Skip to content

Commit cbd5021

Browse files
arndbgregkh
authored andcommitted
random: use offstack cpumask when necessary
[ Upstream commit 5d49f1a ] The entropy generation function keeps a local cpu mask on the stack, which can trigger warnings in configurations with a large number of CPUs: drivers/char/random.c:1292:20: error: stack frame size (1288) exceeds limit (1280) in 'try_to_generate_entropy' [-Werror,-Wframe-larger-than] Use the cpumask interface to dynamically allocate it in those configurations. Fixes: 1c21fe0 ("random: spread out jitter callback to different CPUs") Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 8ddff99 commit cbd5021

File tree

1 file changed

+12
-7
lines changed

1 file changed

+12
-7
lines changed

drivers/char/random.c

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1296,6 +1296,7 @@ static void __cold try_to_generate_entropy(void)
12961296
struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
12971297
unsigned int i, num_different = 0;
12981298
unsigned long last = random_get_entropy();
1299+
cpumask_var_t timer_cpus;
12991300
int cpu = -1;
13001301

13011302
for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
@@ -1310,13 +1311,15 @@ static void __cold try_to_generate_entropy(void)
13101311

13111312
atomic_set(&stack->samples, 0);
13121313
timer_setup_on_stack(&stack->timer, entropy_timer, 0);
1314+
if (!alloc_cpumask_var(&timer_cpus, GFP_KERNEL))
1315+
goto out;
1316+
13131317
while (!crng_ready() && !signal_pending(current)) {
13141318
/*
13151319
* Check !timer_pending() and then ensure that any previous callback has finished
13161320
* executing by checking timer_delete_sync_try(), before queueing the next one.
13171321
*/
13181322
if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
1319-
struct cpumask timer_cpus;
13201323
unsigned int num_cpus;
13211324

13221325
/*
@@ -1326,19 +1329,19 @@ static void __cold try_to_generate_entropy(void)
13261329
preempt_disable();
13271330

13281331
/* Only schedule callbacks on timer CPUs that are online. */
1329-
cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
1330-
num_cpus = cpumask_weight(&timer_cpus);
1332+
cpumask_and(timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
1333+
num_cpus = cpumask_weight(timer_cpus);
13311334
/* In very bizarre case of misconfiguration, fallback to all online. */
13321335
if (unlikely(num_cpus == 0)) {
1333-
timer_cpus = *cpu_online_mask;
1334-
num_cpus = cpumask_weight(&timer_cpus);
1336+
*timer_cpus = *cpu_online_mask;
1337+
num_cpus = cpumask_weight(timer_cpus);
13351338
}
13361339

13371340
/* Basic CPU round-robin, which avoids the current CPU. */
13381341
do {
1339-
cpu = cpumask_next(cpu, &timer_cpus);
1342+
cpu = cpumask_next(cpu, timer_cpus);
13401343
if (cpu >= nr_cpu_ids)
1341-
cpu = cpumask_first(&timer_cpus);
1344+
cpu = cpumask_first(timer_cpus);
13421345
} while (cpu == smp_processor_id() && num_cpus > 1);
13431346

13441347
/* Expiring the timer at `jiffies` means it's the next tick. */
@@ -1354,6 +1357,8 @@ static void __cold try_to_generate_entropy(void)
13541357
}
13551358
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
13561359

1360+
free_cpumask_var(timer_cpus);
1361+
out:
13571362
timer_delete_sync(&stack->timer);
13581363
timer_destroy_on_stack(&stack->timer);
13591364
}

0 commit comments

Comments
 (0)