Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Collect bpf helper arguments related to bpf map #453

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
151 changes: 145 additions & 6 deletions bpf/kprobe_pwru.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ struct event_t {
u64 ts;
u64 print_skb_id;
u64 print_shinfo_id;
u64 print_bpfmap_id;
struct skb_meta meta;
struct tuple tuple;
s64 print_stack_id;
Expand Down Expand Up @@ -167,6 +168,13 @@ struct {
__uint(value_size, MAX_STACK_DEPTH * sizeof(u64));
} print_stack_map SEC(".maps");

struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, u32);
__type(value, struct event_t);
} event_stash SEC(".maps");

struct print_skb_value {
u32 len;
char str[PRINT_SKB_STR_SIZE];
Expand All @@ -175,6 +183,14 @@ struct print_shinfo_value {
u32 len;
char str[PRINT_SHINFO_STR_SIZE];
};
struct print_bpfmap_value {
u32 id;
char name[16];
u32 key_size;
u32 value_size;
u8 key[256];
u8 value[256];
} __attribute__((packed));
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
Expand All @@ -199,6 +215,18 @@ struct {
__type(key, u64);
__type(value, struct print_shinfo_value);
} print_shinfo_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, u32);
__type(value, u32);
} print_bpfmap_id_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1024);
__type(key, u64);
__type(value, struct print_bpfmap_value);
} print_bpfmap_map SEC(".maps");

static __always_inline u32
get_netns(struct sk_buff *skb) {
Expand Down Expand Up @@ -339,7 +367,7 @@ static __always_inline u64
sync_fetch_and_add(void *id_map) {
u32 *id = bpf_map_lookup_elem(id_map, &ZERO);
if (id)
return ((*id)++) | ((u64)bpf_get_smp_processor_id() << 32);
return ((*id)++) | ((u64)(bpf_get_smp_processor_id() + 1) << 32);
return 0;
}

Expand Down Expand Up @@ -508,6 +536,12 @@ handle_everything(struct sk_buff *skb, void *ctx, struct event_t *event, u64 *_s
bpf_map_update_elem(&skb_stackid, &skb, &stackid, BPF_ANY);
}

if (CFG.output_caller)
bpf_probe_read_kernel(&event->caller_addr,
sizeof(event->caller_addr),
(void *)PT_REGS_SP((struct pt_regs *)ctx));

event->skb_addr = skb_addr;
event->pid = bpf_get_current_pid_tgid() >> 32;
event->ts = bpf_ktime_get_ns();
event->cpu_id = bpf_get_smp_processor_id();
Expand All @@ -523,14 +557,10 @@ kprobe_skb(struct sk_buff *skb, struct pt_regs *ctx, const bool has_get_func_ip,
if (!handle_everything(skb, ctx, &event, _stackid, true))
return BPF_OK;

event.skb_addr = (u64) skb;
event.addr = has_get_func_ip ? bpf_get_func_ip(ctx) : PT_REGS_IP(ctx);
event.type = kprobe_multi ? EVENT_TYPE_KPROBE_MULTI: EVENT_TYPE_KPROBE;
event.param_second = PT_REGS_PARM2(ctx);
event.param_third = PT_REGS_PARM3(ctx);
if (CFG.output_caller)
bpf_probe_read_kernel(&event.caller_addr, sizeof(event.caller_addr), (void *)PT_REGS_SP(ctx));


bpf_map_push_elem(&events, &event, BPF_EXIST);

Expand Down Expand Up @@ -618,7 +648,6 @@ int BPF_PROG(fentry_tc, struct sk_buff *skb) {
if (!handle_everything(skb, ctx, &event, NULL, false))
return BPF_OK;

event.skb_addr = (u64) skb;
event.addr = BPF_PROG_ADDR;
event.type = EVENT_TYPE_TC;
bpf_map_push_elem(&events, &event, BPF_EXIST);
Expand Down Expand Up @@ -768,4 +797,114 @@ int kretprobe_veth_convert_skb_to_xdp_buff(struct pt_regs *ctx) {
return BPF_OK;
}

static __always_inline void
set_common_bpfmap_info(struct pt_regs *ctx, u64 *event_id,
struct print_bpfmap_value *bpfmap) {
struct bpf_map *map = (struct bpf_map *)PT_REGS_PARM1(ctx);

*event_id = sync_fetch_and_add(&print_bpfmap_id_map);
BPF_CORE_READ_INTO(&bpfmap->id, map, id);
BPF_CORE_READ_STR_INTO(&bpfmap->name, map, name);
BPF_CORE_READ_INTO(&bpfmap->key_size, map, key_size);
BPF_CORE_READ_INTO(&bpfmap->value_size, map, value_size);
bpf_probe_read_kernel(&bpfmap->key, sizeof(bpfmap->key), (void *)PT_REGS_PARM2(ctx));
}

SEC("kprobe/bpf_map_update_elem")
int kprobe_bpf_map_update_elem(struct pt_regs *ctx) {
u64 stackid = get_stackid(ctx, true);

struct sk_buff **skb = bpf_map_lookup_elem(&stackid_skb, &stackid);
if (skb && *skb) {
struct event_t event = {};

event.addr = PT_REGS_IP(ctx);
if (!handle_everything(*skb, ctx, &event, &stackid, true))
return BPF_OK;


static struct print_bpfmap_value bpfmap = {};
set_common_bpfmap_info(ctx, &event.print_bpfmap_id, &bpfmap);
bpf_probe_read_kernel(&bpfmap.value,
sizeof(bpfmap.value),
(void *)PT_REGS_PARM3(ctx));

bpf_map_update_elem(&print_bpfmap_map, &event.print_bpfmap_id, &bpfmap, BPF_ANY);
bpf_map_push_elem(&events, &event, BPF_EXIST);
}

return BPF_OK;
}

SEC("kprobe/bpf_map_delete_elem")
int kprobe_bpf_map_delete_elem(struct pt_regs *ctx) {
u64 stackid = get_stackid(ctx, true);

struct sk_buff **skb = bpf_map_lookup_elem(&stackid_skb, &stackid);
if (skb && *skb) {
struct event_t event = {};

event.addr = PT_REGS_IP(ctx);
if (!handle_everything(*skb, ctx, &event, &stackid, true))
return BPF_OK;

static struct print_bpfmap_value bpfmap = {};
set_common_bpfmap_info(ctx, &event.print_bpfmap_id, &bpfmap);

bpf_map_update_elem(&print_bpfmap_map, &event.print_bpfmap_id, &bpfmap, BPF_ANY);
bpf_map_push_elem(&events, &event, BPF_EXIST);
}

return BPF_OK;
}

SEC("kprobe/bpf_map_lookup_elem")
int kprobe_bpf_map_lookup_elem(struct pt_regs *ctx) {
u64 stackid = get_stackid(ctx, true);

struct sk_buff **skb = bpf_map_lookup_elem(&stackid_skb, &stackid);
if (skb && *skb) {
struct event_t event = {};

event.addr = PT_REGS_IP(ctx);
if (!handle_everything(*skb, ctx, &event, &stackid, true))
return BPF_OK;

static struct print_bpfmap_value bpfmap = {};
set_common_bpfmap_info(ctx, &event.print_bpfmap_id, &bpfmap);

bpf_map_update_elem(&print_bpfmap_map, &event.print_bpfmap_id, &bpfmap, BPF_ANY);
bpf_map_update_elem(&event_stash, &ZERO, &event, BPF_ANY);
}

return BPF_OK;
}

SEC("kretprobe/bpf_map_lookup_elem")
int kretprobe_bpf_map_lookup_elem(struct pt_regs *ctx) {
/* Two assumptions:
* 1. CPU won't be preempted between kprobe and kretprobe of the same
* lookup operation.
* 2. Lookup won't be recursive.
*
* I believe both are true in the current implementation of BPF.
* Therefore, using PERCPU array to stash the event is safe.
*/
struct event_t *event = bpf_map_lookup_elem(&event_stash, &ZERO);
if (!event)
return BPF_OK;

struct print_bpfmap_value *bpfmap = bpf_map_lookup_elem(&print_bpfmap_map,
&event->print_bpfmap_id);
if (!bpfmap)
return BPF_OK;

bpf_probe_read_kernel(&bpfmap->value,
sizeof(bpfmap->value),
(void *)PT_REGS_RC(ctx));

bpf_map_push_elem(&events, event, BPF_EXIST);
return BPF_OK;
}

char __license[] SEC("license") = "Dual BSD/GPL";
47 changes: 46 additions & 1 deletion internal/pwru/kprobe.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import (

"github.com/cheggaaa/pb/v3"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/link"
"golang.org/x/sync/errgroup"
)
Expand Down Expand Up @@ -247,7 +248,7 @@ func NewKprober(ctx context.Context, funcs Funcs, coll *ebpf.Collection, a2n Add
return &k
}

func NewNonSkbFuncsKprober(nonSkbFuncs []string, funcs Funcs, coll *ebpf.Collection) *kprober {
func NewNonSkbFuncsKprober(nonSkbFuncs []string, funcs Funcs, bpfmapFuncs map[string]*btf.FuncProto, coll *ebpf.Collection) *kprober {
slices.Sort(nonSkbFuncs)
nonSkbFuncs = slices.Compact(nonSkbFuncs)

Expand All @@ -264,6 +265,50 @@ func NewNonSkbFuncsKprober(nonSkbFuncs []string, funcs Funcs, coll *ebpf.Collect
continue
}

if _, ok := bpfmapFuncs[fn]; ok {
if strings.HasSuffix(fn, "_lookup_elem") {
kp, err := link.Kprobe(fn, coll.Programs["kprobe_bpf_map_lookup_elem"], nil)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
log.Printf("Failed to attach bpf_map_lookup_elem kprobe %s: %s\n", fn, err)
}
continue
}
k.links = append(k.links, kp)

krp, err := link.Kretprobe(fn, coll.Programs["kretprobe_bpf_map_lookup_elem"], nil)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
log.Printf("Failed to open bpf_map_lookup_elem kretprobe %s: %s\n", fn, err)
}
continue
}
k.links = append(k.links, krp)

} else if strings.HasSuffix(fn, "_update_elem") {
kp, err := link.Kprobe(fn, coll.Programs["kprobe_bpf_map_update_elem"], nil)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
log.Printf("Failed to open bpf_map_update_elem kprobe %s: %s\n", fn, err)
}
continue
}
k.links = append(k.links, kp)

} else if strings.HasSuffix(fn, "_delete_elem") {
kp, err := link.Kprobe(fn, coll.Programs["kprobe_bpf_map_delete_elem"], nil)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
log.Printf("Failed to open bpf_map_delete_elem kprobe %s: %s\n", fn, err)
}
continue
}
k.links = append(k.links, kp)
}

continue
}

kp, err := link.Kprobe(fn, coll.Programs["kprobe_skb_by_stackid"], nil)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
Expand Down
24 changes: 23 additions & 1 deletion internal/pwru/output.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
package pwru

import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
Expand Down Expand Up @@ -43,6 +45,7 @@ type output struct {
printSkbMap *ebpf.Map
printShinfoMap *ebpf.Map
printStackMap *ebpf.Map
printBpfmapMap *ebpf.Map
addr2name Addr2Name
writer *os.File
kprobeMulti bool
Expand Down Expand Up @@ -88,7 +91,7 @@ func centerAlignString(s string, width int) string {
return fmt.Sprintf("%s%s%s", strings.Repeat(" ", leftPadding), s, strings.Repeat(" ", rightPadding))
}

func NewOutput(flags *Flags, printSkbMap, printShinfoMap, printStackMap *ebpf.Map, addr2Name Addr2Name, kprobeMulti bool, btfSpec *btf.Spec) (*output, error) {
func NewOutput(flags *Flags, printSkbMap, printShinfoMap, printStackMap, printBpfmapMap *ebpf.Map, addr2Name Addr2Name, kprobeMulti bool, btfSpec *btf.Spec) (*output, error) {
writer := os.Stdout

if flags.OutputFile != "" {
Expand Down Expand Up @@ -118,6 +121,7 @@ func NewOutput(flags *Flags, printSkbMap, printShinfoMap, printStackMap *ebpf.Ma
printSkbMap: printSkbMap,
printShinfoMap: printShinfoMap,
printStackMap: printStackMap,
printBpfmapMap: printBpfmapMap,
addr2name: addr2Name,
writer: writer,
kprobeMulti: kprobeMulti,
Expand Down Expand Up @@ -454,6 +458,10 @@ func (o *output) Print(event *Event) {
fmt.Fprintf(o.writer, "%s", getShinfoData(event, o))
}

if o.flags.OutputBpfmap && event.PrintBpfmapId > 0 {
fmt.Fprintf(o.writer, "%s", getBpfMapData(event, o))
}

fmt.Fprintln(o.writer)
}

Expand Down Expand Up @@ -613,3 +621,17 @@ func getIfacesInNetNs(path string) (map[uint32]string, error) {

return ifaces, nil
}

func getBpfMapData(event *Event, o *output) (bpfMapData string) {
id := uint64(event.PrintBpfmapId)
b, err := o.printBpfmapMap.LookupBytes(&id)
if err != nil {
return ""
}
bpfmap := printBpfmapValue{}
if err = binary.Read(bytes.NewBuffer(b), byteorder.Native, &bpfmap); err != nil {
return ""
}
defer o.printBpfmapMap.Delete(&id)
return "\n" + bpfmap.String()
}
Loading
Loading