Skip to content

Commit 0a94a4a

Browse files
author
CKI KWF Bot
committed
Merge: bpf: v6.17 stable backport
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/7593 JIRA: https://issues.redhat.com/browse/RHEL-110279 Depends: !7460 Stable backports for the BPF subsystem from upstream kernel version 6.17. Signed-off-by: Jerome Marchand <jmarchan@redhat.com> Approved-by: Toke Høiland-Jørgensen <toke@redhat.com> Approved-by: Viktor Malik <vmalik@redhat.com> Approved-by: Gregory Bell <grbell@redhat.com> Approved-by: iqian <iqian@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: CKI GitLab Kmaint Pipeline Bot <26919896-cki-kmaint-pipeline-bot@users.noreply.gitlab.com>
2 parents 3cae1ca + 2aaa28a commit 0a94a4a

File tree

14 files changed

+221
-80
lines changed

14 files changed

+221
-80
lines changed

arch/arm64/net/bpf_jit_comp.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -399,6 +399,7 @@ static void push_callee_regs(struct jit_ctx *ctx)
399399
emit(A64_PUSH(A64_R(23), A64_R(24), A64_SP), ctx);
400400
emit(A64_PUSH(A64_R(25), A64_R(26), A64_SP), ctx);
401401
emit(A64_PUSH(A64_R(27), A64_R(28), A64_SP), ctx);
402+
ctx->fp_used = true;
402403
} else {
403404
find_used_callee_regs(ctx);
404405
for (i = 0; i + 1 < ctx->nr_used_callee_reg; i += 2) {

include/linux/bpf-cgroup.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -72,9 +72,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
7272
extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
7373
#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
7474

75-
#define for_each_cgroup_storage_type(stype) \
76-
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
77-
7875
struct bpf_cgroup_storage_map;
7976

8077
struct bpf_storage_buffer {
@@ -499,8 +496,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
499496
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
500497
kernel_optval) ({ 0; })
501498

502-
#define for_each_cgroup_storage_type(stype) for (; false; )
503-
504499
#endif /* CONFIG_CGROUP_BPF */
505500

506501
#endif /* _BPF_CGROUP_H */

include/linux/bpf.h

Lines changed: 40 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,20 @@ enum btf_field_type {
205205
BPF_WORKQUEUE = (1 << 10),
206206
};
207207

208+
enum bpf_cgroup_storage_type {
209+
BPF_CGROUP_STORAGE_SHARED,
210+
BPF_CGROUP_STORAGE_PERCPU,
211+
__BPF_CGROUP_STORAGE_MAX
212+
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
213+
};
214+
215+
#ifdef CONFIG_CGROUP_BPF
216+
# define for_each_cgroup_storage_type(stype) \
217+
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
218+
#else
219+
# define for_each_cgroup_storage_type(stype) for (; false; )
220+
#endif /* CONFIG_CGROUP_BPF */
221+
208222
typedef void (*btf_dtor_kfunc_t)(void *);
209223

210224
struct btf_field_kptr {
@@ -256,6 +270,19 @@ struct bpf_list_node_kern {
256270
void *owner;
257271
} __attribute__((aligned(8)));
258272

273+
/* 'Ownership' of program-containing map is claimed by the first program
274+
* that is going to use this map or by the first program which FD is
275+
* stored in the map to make sure that all callers and callees have the
276+
* same prog type, JITed flag and xdp_has_frags flag.
277+
*/
278+
struct bpf_map_owner {
279+
enum bpf_prog_type type;
280+
bool jited;
281+
bool xdp_has_frags;
282+
u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
283+
const struct btf_type *attach_func_proto;
284+
};
285+
259286
struct bpf_map {
260287
const struct bpf_map_ops *ops;
261288
struct bpf_map *inner_map_meta;
@@ -288,24 +315,15 @@ struct bpf_map {
288315
struct rcu_head rcu;
289316
};
290317
atomic64_t writecnt;
291-
/* 'Ownership' of program-containing map is claimed by the first program
292-
* that is going to use this map or by the first program which FD is
293-
* stored in the map to make sure that all callers and callees have the
294-
* same prog type, JITed flag and xdp_has_frags flag.
295-
*/
296-
struct {
297-
const struct btf_type *attach_func_proto;
298-
spinlock_t lock;
299-
enum bpf_prog_type type;
300-
bool jited;
301-
bool xdp_has_frags;
302-
} owner;
318+
spinlock_t owner_lock;
319+
struct bpf_map_owner *owner;
303320
bool bypass_spec_v1;
304321
bool frozen; /* write-once; write-protected by freeze_mutex */
305322
bool free_after_mult_rcu_gp;
306323
bool free_after_rcu_gp;
307324
atomic64_t sleepable_refcnt;
308325
s64 __percpu *elem_count;
326+
u64 cookie; /* write-once */
309327
};
310328

311329
static inline const char *btf_field_type_name(enum btf_field_type type)
@@ -1025,14 +1043,6 @@ struct bpf_prog_offload {
10251043
u32 jited_len;
10261044
};
10271045

1028-
enum bpf_cgroup_storage_type {
1029-
BPF_CGROUP_STORAGE_SHARED,
1030-
BPF_CGROUP_STORAGE_PERCPU,
1031-
__BPF_CGROUP_STORAGE_MAX
1032-
};
1033-
1034-
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
1035-
10361046
/* The longest tracepoint has 12 args.
10371047
* See include/trace/bpf_probe.h
10381048
*/
@@ -1978,6 +1988,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
19781988
(BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
19791989
}
19801990

1991+
static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
1992+
{
1993+
return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
1994+
}
1995+
1996+
static inline void bpf_map_owner_free(struct bpf_map *map)
1997+
{
1998+
kfree(map->owner);
1999+
}
2000+
19812001
struct bpf_event_entry {
19822002
struct perf_event *event;
19832003
struct file *perf_file;

kernel/bpf/core.c

Lines changed: 57 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -780,7 +780,10 @@ bool is_bpf_text_address(unsigned long addr)
780780

781781
struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
782782
{
783-
struct bpf_ksym *ksym = bpf_ksym_find(addr);
783+
struct bpf_ksym *ksym;
784+
785+
WARN_ON_ONCE(!rcu_read_lock_held());
786+
ksym = bpf_ksym_find(addr);
784787

785788
return ksym && ksym->prog ?
786789
container_of(ksym, struct bpf_prog_aux, ksym)->prog :
@@ -2307,40 +2310,48 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
23072310
return 0;
23082311
}
23092312

2310-
bool bpf_prog_map_compatible(struct bpf_map *map,
2311-
const struct bpf_prog *fp)
2313+
static bool __bpf_prog_map_compatible(struct bpf_map *map,
2314+
const struct bpf_prog *fp)
23122315
{
23132316
enum bpf_prog_type prog_type = resolve_prog_type(fp);
2314-
bool ret;
23152317
struct bpf_prog_aux *aux = fp->aux;
2318+
enum bpf_cgroup_storage_type i;
2319+
bool ret = false;
2320+
u64 cookie;
23162321

23172322
if (fp->kprobe_override)
2318-
return false;
2319-
2320-
/* XDP programs inserted into maps are not guaranteed to run on
2321-
* a particular netdev (and can run outside driver context entirely
2322-
* in the case of devmap and cpumap). Until device checks
2323-
* are implemented, prohibit adding dev-bound programs to program maps.
2324-
*/
2325-
if (bpf_prog_is_dev_bound(aux))
2326-
return false;
2323+
return ret;
23272324

2328-
spin_lock(&map->owner.lock);
2329-
if (!map->owner.type) {
2330-
/* There's no owner yet where we could check for
2331-
* compatibility.
2332-
*/
2333-
map->owner.type = prog_type;
2334-
map->owner.jited = fp->jited;
2335-
map->owner.xdp_has_frags = aux->xdp_has_frags;
2336-
map->owner.attach_func_proto = aux->attach_func_proto;
2325+
spin_lock(&map->owner_lock);
2326+
/* There's no owner yet where we could check for compatibility. */
2327+
if (!map->owner) {
2328+
map->owner = bpf_map_owner_alloc(map);
2329+
if (!map->owner)
2330+
goto err;
2331+
map->owner->type = prog_type;
2332+
map->owner->jited = fp->jited;
2333+
map->owner->xdp_has_frags = aux->xdp_has_frags;
2334+
map->owner->attach_func_proto = aux->attach_func_proto;
2335+
for_each_cgroup_storage_type(i) {
2336+
map->owner->storage_cookie[i] =
2337+
aux->cgroup_storage[i] ?
2338+
aux->cgroup_storage[i]->cookie : 0;
2339+
}
23372340
ret = true;
23382341
} else {
2339-
ret = map->owner.type == prog_type &&
2340-
map->owner.jited == fp->jited &&
2341-
map->owner.xdp_has_frags == aux->xdp_has_frags;
2342+
ret = map->owner->type == prog_type &&
2343+
map->owner->jited == fp->jited &&
2344+
map->owner->xdp_has_frags == aux->xdp_has_frags;
2345+
for_each_cgroup_storage_type(i) {
2346+
if (!ret)
2347+
break;
2348+
cookie = aux->cgroup_storage[i] ?
2349+
aux->cgroup_storage[i]->cookie : 0;
2350+
ret = map->owner->storage_cookie[i] == cookie ||
2351+
!cookie;
2352+
}
23422353
if (ret &&
2343-
map->owner.attach_func_proto != aux->attach_func_proto) {
2354+
map->owner->attach_func_proto != aux->attach_func_proto) {
23442355
switch (prog_type) {
23452356
case BPF_PROG_TYPE_TRACING:
23462357
case BPF_PROG_TYPE_LSM:
@@ -2353,11 +2364,24 @@ bool bpf_prog_map_compatible(struct bpf_map *map,
23532364
}
23542365
}
23552366
}
2356-
spin_unlock(&map->owner.lock);
2357-
2367+
err:
2368+
spin_unlock(&map->owner_lock);
23582369
return ret;
23592370
}
23602371

2372+
bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp)
2373+
{
2374+
/* XDP programs inserted into maps are not guaranteed to run on
2375+
* a particular netdev (and can run outside driver context entirely
2376+
* in the case of devmap and cpumap). Until device checks
2377+
* are implemented, prohibit adding dev-bound programs to program maps.
2378+
*/
2379+
if (bpf_prog_is_dev_bound(fp->aux))
2380+
return false;
2381+
2382+
return __bpf_prog_map_compatible(map, fp);
2383+
}
2384+
23612385
static int bpf_check_tail_call(const struct bpf_prog *fp)
23622386
{
23632387
struct bpf_prog_aux *aux = fp->aux;
@@ -2370,7 +2394,7 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
23702394
if (!map_type_contains_progs(map))
23712395
continue;
23722396

2373-
if (!bpf_prog_map_compatible(map, fp)) {
2397+
if (!__bpf_prog_map_compatible(map, fp)) {
23742398
ret = -EINVAL;
23752399
goto out;
23762400
}
@@ -2931,7 +2955,10 @@ EXPORT_SYMBOL_GPL(bpf_event_output);
29312955

29322956
/* Always built-in helper functions. */
29332957
const struct bpf_func_proto bpf_tail_call_proto = {
2934-
.func = NULL,
2958+
/* func is unused for tail_call, we set it to pass the
2959+
* get_helper_proto check
2960+
*/
2961+
.func = BPF_PTR_POISON,
29352962
.gpl_only = false,
29362963
.ret_type = RET_VOID,
29372964
.arg1_type = ARG_PTR_TO_CTX,

kernel/bpf/helpers.c

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2777,9 +2777,16 @@ static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
27772777
struct bpf_throw_ctx *ctx = cookie;
27782778
struct bpf_prog *prog;
27792779

2780-
if (!is_bpf_text_address(ip))
2781-
return !ctx->cnt;
2780+
/*
2781+
* The RCU read lock is held to safely traverse the latch tree, but we
2782+
* don't need its protection when accessing the prog, since it has an
2783+
* active stack frame on the current stack trace, and won't disappear.
2784+
*/
2785+
rcu_read_lock();
27822786
prog = bpf_prog_ksym_find(ip);
2787+
rcu_read_unlock();
2788+
if (!prog)
2789+
return !ctx->cnt;
27832790
ctx->cnt++;
27842791
if (bpf_is_subprog(prog))
27852792
return true;

kernel/bpf/syscall.c

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#include <linux/bpf-netns.h>
3636
#include <linux/rcupdate_trace.h>
3737
#include <linux/memcontrol.h>
38+
#include <linux/cookie.h>
3839
#include <linux/trace_events.h>
3940
#include <net/netfilter/nf_bpf_link.h>
4041

@@ -51,6 +52,7 @@
5152
#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
5253

5354
DEFINE_PER_CPU(int, bpf_prog_active);
55+
DEFINE_COOKIE(bpf_map_cookie);
5456
static DEFINE_IDR(prog_idr);
5557
static DEFINE_SPINLOCK(prog_idr_lock);
5658
static DEFINE_IDR(map_idr);
@@ -782,6 +784,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
782784

783785
security_bpf_map_free(map);
784786
bpf_map_release_memcg(map);
787+
bpf_map_owner_free(map);
785788
bpf_map_free(map);
786789
}
787790

@@ -876,12 +879,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
876879
struct bpf_map *map = filp->private_data;
877880
u32 type = 0, jited = 0;
878881

879-
if (map_type_contains_progs(map)) {
880-
spin_lock(&map->owner.lock);
881-
type = map->owner.type;
882-
jited = map->owner.jited;
883-
spin_unlock(&map->owner.lock);
882+
spin_lock(&map->owner_lock);
883+
if (map->owner) {
884+
type = map->owner->type;
885+
jited = map->owner->jited;
884886
}
887+
spin_unlock(&map->owner_lock);
885888

886889
seq_printf(m,
887890
"map_type:\t%u\n"
@@ -1377,10 +1380,14 @@ static int map_create(union bpf_attr *attr)
13771380
if (err < 0)
13781381
goto free_map;
13791382

1383+
preempt_disable();
1384+
map->cookie = gen_cookie_next(&bpf_map_cookie);
1385+
preempt_enable();
1386+
13801387
atomic64_set(&map->refcnt, 1);
13811388
atomic64_set(&map->usercnt, 1);
13821389
mutex_init(&map->freeze_mutex);
1383-
spin_lock_init(&map->owner.lock);
1390+
spin_lock_init(&map->owner_lock);
13841391

13851392
if (attr->btf_key_type_id || attr->btf_value_type_id ||
13861393
/* Even the map's value is a kernel's struct,

kernel/bpf/verifier.c

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -400,7 +400,8 @@ static bool reg_not_null(const struct bpf_reg_state *reg)
400400
type == PTR_TO_MAP_KEY ||
401401
type == PTR_TO_SOCK_COMMON ||
402402
(type == PTR_TO_BTF_ID && is_trusted_reg(reg)) ||
403-
type == PTR_TO_MEM;
403+
type == PTR_TO_MEM ||
404+
type == CONST_PTR_TO_MAP;
404405
}
405406

406407
static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
@@ -10477,7 +10478,7 @@ static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
1047710478
return -EINVAL;
1047810479

1047910480
*ptr = env->ops->get_func_proto(func_id, env->prog);
10480-
return *ptr ? 0 : -EINVAL;
10481+
return *ptr && (*ptr)->func ? 0 : -EINVAL;
1048110482
}
1048210483

1048310484
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
@@ -15132,6 +15133,10 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state
1513215133
if (!is_reg_const(reg2, is_jmp32))
1513315134
break;
1513415135
val = reg_const_value(reg2, is_jmp32);
15136+
/* Forget the ranges before narrowing tnums, to avoid invariant
15137+
* violations if we're on a dead branch.
15138+
*/
15139+
__mark_reg_unbounded(reg1);
1513515140
if (is_jmp32) {
1513615141
t = tnum_and(tnum_subreg(reg1->var_off), tnum_const(~val));
1513715142
reg1->var_off = tnum_with_subreg(reg1->var_off, t);

0 commit comments

Comments
 (0)