Loading...
Loading...
eBPF skill for Linux observability and networking. Use when writing eBPF programs with libbpf or bpftrace, attaching kprobes/tracepoints/XDP hooks, debugging verifier errors, working with eBPF maps, or achieving CO-RE portability across kernel versions. Activates on queries about eBPF, bpftool, bpftrace, XDP programs, libbpf, verifier errors, eBPF maps, or kernel tracing with BPF.
npx skill4agent add mohitmishra786/low-level-dev-skills ebpfGoal?
├── One-liner kernel tracing / scripting → bpftrace
├── Production eBPF program with userspace → libbpf (C) or aya (Rust)
├── Inspect loaded programs and maps → bpftool
└── High-performance packet processing → XDP + libbpf# Trace all execve calls with comm and args
bpftrace -e 'tracepoint:syscalls:sys_enter_execve { printf("%s %s\n", comm, str(args->filename)); }'
# Count syscalls by process
bpftrace -e 'tracepoint:raw_syscalls:sys_enter { @[comm] = count(); }'
# Latency histogram for read() syscall
bpftrace -e '
tracepoint:syscalls:sys_enter_read { @start[tid] = nsecs; }
tracepoint:syscalls:sys_exit_read { @us = hist((nsecs - @start[tid]) / 1000); delete(@start[tid]); }'
# List available tracepoints
bpftrace -l 'tracepoint:syscalls:*'
bpftrace -l 'kprobe:tcp_*'// counter.bpf.c — kernel-side
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, u32);
__type(value, u64);
__uint(max_entries, 1024);
} call_count SEC(".maps");
SEC("tracepoint/syscalls/sys_enter_read")
int trace_read(struct trace_event_raw_sys_enter *ctx)
{
u32 pid = bpf_get_current_pid_tgid() >> 32;
u64 *cnt = bpf_map_lookup_elem(&call_count, &pid);
if (cnt)
(*cnt)++;
else {
u64 one = 1;
bpf_map_update_elem(&call_count, &pid, &one, BPF_ANY);
}
return 0;
}
char LICENSE[] SEC("license") = "GPL";// counter.c — userspace loader
#include "counter.skel.h"
int main(void) {
struct counter_bpf *skel = counter_bpf__open_and_load();
counter_bpf__attach(skel);
// read map, print results
counter_bpf__destroy(skel);
}# Build with libbpf
clang -g -O2 -target bpf -D__TARGET_ARCH_x86 -I/usr/include/bpf \
-c counter.bpf.c -o counter.bpf.o
bpftool gen skeleton counter.bpf.o > counter.skel.h
gcc -o counter counter.c -lbpf -lelf -lz| Map type | Key→Value | Use case |
|---|---|---|
| arbitrary→arbitrary | Per-PID counters, state |
| u32→fixed | Config, metrics indexed by CPU |
| key→per-CPU val | High-frequency counters without locks |
| — | Efficient kernel→userspace events |
| — | Legacy perf event output |
| key→val | Connection tracking, limited size |
| u32→prog | Tail calls, program chaining |
| — | AF_XDP socket redirection |
BPF_MAP_TYPE_RINGBUFPERF_EVENT_ARRAY| Error message | Root cause | Fix |
|---|---|---|
| Dereferencing unbounded pointer | Check pointer with null test before use |
| Return without setting R0 | Ensure all paths set a return value |
| Branch target beyond program end | Restructure conditionals |
| Backward jump (loop) | Use |
| Dead code after return | Remove dead branches |
| Stack read of uninitialised bytes | Zero-init structs: |
| Pointer arithmetic off alignment | Align reads to |
# Get detailed verifier log
bpftool prog load prog.bpf.o /sys/fs/bpf/prog type kprobe \
2>&1 | head -100
# Check loaded programs
bpftool prog list
bpftool prog dump xlated id 42// xdp_drop_icmp.bpf.c
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
SEC("xdp")
int xdp_filter(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
if ((void *)(eth + 1) > data_end)
return XDP_PASS;
if (bpf_ntohs(eth->h_proto) != ETH_P_IP)
return XDP_PASS;
struct iphdr *ip = (void *)(eth + 1);
if ((void *)(ip + 1) > data_end)
return XDP_PASS;
if (ip->protocol == IPPROTO_ICMP)
return XDP_DROP;
return XDP_PASS;
}
char LICENSE[] SEC("license") = "GPL";# Attach XDP program to interface
ip link set dev eth0 xdp obj xdp_drop_icmp.bpf.o sec xdp
# Remove
ip link set dev eth0 xdp off
# Use native (driver) mode for best performance
ip link set dev eth0 xdp obj prog.bpf.o sec xdp mode nativeXDP_PASSXDP_DROPXDP_TXXDP_REDIRECT// Use BTF-based field access (CO-RE aware)
#include <vmlinux.h> // generated from running kernel's BTF
#include <bpf/bpf_core_read.h>
SEC("kprobe/tcp_connect")
int trace_connect(struct pt_regs *ctx)
{
struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
u16 dport = BPF_CORE_READ(sk, __sk_common.skc_dport);
// BPF_CORE_READ relocates the field offset at load time
bpf_printk("connect to port %d\n", bpf_ntohs(dport));
return 0;
}# Generate vmlinux.h from running kernel
bpftool btf dump file /sys/kernel/btf/vmlinux format c > vmlinux.h
# Verify BTF is enabled
ls /sys/kernel/btf/vmlinuxskills/observability/ebpf-rustskills/profilers/linux-perfskills/runtimes/binary-hardeningskills/low-level-programming/linux-kernel-modules