@@ -297,6 +297,12 @@ static inline void dec_attach_in_progress(struct cpuset *cs)
297297 mutex_unlock (& cpuset_mutex );
298298}
299299
300+ static inline bool cpuset_v2 (void )
301+ {
302+ return !IS_ENABLED (CONFIG_CPUSETS_V1 ) ||
303+ cgroup_subsys_on_dfl (cpuset_cgrp_subsys );
304+ }
305+
300306/*
301307 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
302308 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
@@ -307,7 +313,7 @@ static inline void dec_attach_in_progress(struct cpuset *cs)
307313 */
308314static inline bool is_in_v2_mode (void )
309315{
310- return cgroup_subsys_on_dfl ( cpuset_cgrp_subsys ) ||
316+ return cpuset_v2 ( ) ||
311317 (cpuset_cgrp_subsys .root -> flags & CGRP_ROOT_CPUSET_V2_MODE );
312318}
313319
@@ -742,7 +748,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
742748 int nslot ; /* next empty doms[] struct cpumask slot */
743749 struct cgroup_subsys_state * pos_css ;
744750 bool root_load_balance = is_sched_load_balance (& top_cpuset );
745- bool cgrpv2 = cgroup_subsys_on_dfl ( cpuset_cgrp_subsys );
751+ bool cgrpv2 = cpuset_v2 ( );
746752 int nslot_update ;
747753
748754 doms = NULL ;
@@ -1215,7 +1221,7 @@ static void reset_partition_data(struct cpuset *cs)
12151221{
12161222 struct cpuset * parent = parent_cs (cs );
12171223
1218- if (!cgroup_subsys_on_dfl ( cpuset_cgrp_subsys ))
1224+ if (!cpuset_v2 ( ))
12191225 return ;
12201226
12211227 lockdep_assert_held (& callback_lock );
@@ -2097,7 +2103,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
20972103 */
20982104 if (!cp -> partition_root_state && !force &&
20992105 cpumask_equal (tmp -> new_cpus , cp -> effective_cpus ) &&
2100- (!cgroup_subsys_on_dfl ( cpuset_cgrp_subsys ) ||
2106+ (!cpuset_v2 ( ) ||
21012107 (is_sched_load_balance (parent ) == is_sched_load_balance (cp )))) {
21022108 pos_css = css_rightmost_descendant (pos_css );
21032109 continue ;
@@ -2174,8 +2180,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
21742180 * from parent if current cpuset isn't a valid partition root
21752181 * and their load balance states differ.
21762182 */
2177- if (cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) &&
2178- !is_partition_valid (cp ) &&
2183+ if (cpuset_v2 () && !is_partition_valid (cp ) &&
21792184 (is_sched_load_balance (parent ) != is_sched_load_balance (cp ))) {
21802185 if (is_sched_load_balance (parent ))
21812186 set_bit (CS_SCHED_LOAD_BALANCE , & cp -> flags );
@@ -2191,8 +2196,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
21912196 */
21922197 if (!cpumask_empty (cp -> cpus_allowed ) &&
21932198 is_sched_load_balance (cp ) &&
2194- (!cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) ||
2195- is_partition_valid (cp )))
2199+ (!cpuset_v2 () || is_partition_valid (cp )))
21962200 need_rebuild_sched_domains = true;
21972201
21982202 rcu_read_lock ();
@@ -2337,7 +2341,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
23372341
23382342 retval = validate_change (cs , trialcs );
23392343
2340- if ((retval == - EINVAL ) && cgroup_subsys_on_dfl ( cpuset_cgrp_subsys )) {
2344+ if ((retval == - EINVAL ) && cpuset_v2 ( )) {
23412345 struct cgroup_subsys_state * css ;
23422346 struct cpuset * cp ;
23432347
@@ -2824,8 +2828,7 @@ int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
28242828 spin_unlock_irq (& callback_lock );
28252829
28262830 if (!cpumask_empty (trialcs -> cpus_allowed ) && balance_flag_changed ) {
2827- if (!IS_ENABLED (CONFIG_CPUSETS_V1 ) ||
2828- cgroup_subsys_on_dfl (cpuset_cgrp_subsys ))
2831+ if (cpuset_v2 ())
28292832 cpuset_force_rebuild ();
28302833 else
28312834 rebuild_sched_domains_locked ();
@@ -3005,8 +3008,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
30053008 * migration permission derives from hierarchy ownership in
30063009 * cgroup_procs_write_permission()).
30073010 */
3008- if (!cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) ||
3009- (cpus_updated || mems_updated )) {
3011+ if (!cpuset_v2 () || (cpus_updated || mems_updated )) {
30103012 ret = security_task_setscheduler (task );
30113013 if (ret )
30123014 goto out_unlock ;
@@ -3120,8 +3122,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
31203122 * in effective cpus and mems. In that case, we can optimize out
31213123 * by skipping the task iteration and update.
31223124 */
3123- if (cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) &&
3124- !cpus_updated && !mems_updated ) {
3125+ if (cpuset_v2 () && !cpus_updated && !mems_updated ) {
31253126 cpuset_attach_nodemask_to = cs -> effective_mems ;
31263127 goto out ;
31273128 }
@@ -3471,7 +3472,7 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
34713472 INIT_LIST_HEAD (& cs -> remote_sibling );
34723473
34733474 /* Set CS_MEMORY_MIGRATE for default hierarchy */
3474- if (cgroup_subsys_on_dfl ( cpuset_cgrp_subsys ))
3475+ if (cpuset_v2 ( ))
34753476 __set_bit (CS_MEMORY_MIGRATE , & cs -> flags );
34763477
34773478 return & cs -> css ;
@@ -3498,8 +3499,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
34983499 /*
34993500 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
35003501 */
3501- if (cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) &&
3502- !is_sched_load_balance (parent ))
3502+ if (cpuset_v2 () && !is_sched_load_balance (parent ))
35033503 clear_bit (CS_SCHED_LOAD_BALANCE , & cs -> flags );
35043504
35053505 cpuset_inc ();
@@ -3566,8 +3566,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
35663566 cpus_read_lock ();
35673567 mutex_lock (& cpuset_mutex );
35683568
3569- if (!cgroup_subsys_on_dfl (cpuset_cgrp_subsys ) &&
3570- is_sched_load_balance (cs ))
3569+ if (!cpuset_v2 () && is_sched_load_balance (cs ))
35713570 cpuset_update_flag (CS_SCHED_LOAD_BALANCE , cs , 0 );
35723571
35733572 cpuset_dec ();
0 commit comments