udevstats: add new package

This package uses eBPF to do traffic accounting ont he WAN port

Fixes: WIFI-12183
Signed-off-by: John Crispin <john@phrozen.org>
This commit is contained in:
John Crispin
2023-01-23 09:49:13 +01:00
parent 2ec381534e
commit 6129f525d5
12 changed files with 1595 additions and 2 deletions

View File

@@ -4,10 +4,10 @@ PKG_NAME:=ucentral-schema
PKG_RELEASE:=1
PKG_SOURCE_URL=https://github.com/Telecominfraproject/wlan-ucentral-schema.git
PKG_MIRROR_HASH:=c4895cd1a46c7a4ffed5c6519c037a291991c70f1339c5e7dec1f0829390f036
PKG_MIRROR_HASH:=bdb662ee4a4e6ac3bb00c14c9ebcc9a7ab9e7153a8ff3dad8f44edd6f1806f86
PKG_SOURCE_PROTO:=git
PKG_SOURCE_DATE:=2022-05-29
PKG_SOURCE_VERSION:=47b470f1221b57766b862edc7e4b875d1ae94b2a
PKG_SOURCE_VERSION:=bb84cc80cc4d63fe3f1e8669086544ea8fb98b37
PKG_MAINTAINER:=John Crispin <john@phrozen.org>
PKG_LICENSE:=BSD-3-Clause

View File

@@ -0,0 +1,40 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=ucode-mod-bpf
PKG_RELEASE:=1
PKG_LICENSE:=ISC
PKG_MAINTAINER:=Felix Fietkau <nbd@nbd.name>
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/nls.mk
define Package/ucode-mod-bpf
SECTION:=utils
CATEGORY:=Utilities
TITLE:=ucode eBPF module
DEPENDS:=+libucode +libbpf
endef
define Package/ucode-mod-bpf/description
The bpf plugin provides functionality for loading and interacting with
eBPF modules.
It allows loading full modules and pinned maps/programs and supports
interacting with maps and attaching programs as tc classifiers.
endef
define Package/ucode-mod-bpf/install
$(INSTALL_DIR) $(1)/usr/lib/ucode
$(CP) $(PKG_BUILD_DIR)/bpf.so $(1)/usr/lib/ucode/
endef
define Build/Configure
endef
define Build/Compile
$(TARGET_CC) $(TARGET_CFLAGS) $(TARGET_LDFLAGS) $(FPIC) \
-Wall -ffunction-sections -Wl,--gc-sections -shared -Wl,--no-as-needed -lbpf \
-o $(PKG_BUILD_DIR)/bpf.so $(PKG_BUILD_DIR)/bpf.c
endef
$(eval $(call BuildPackage,ucode-mod-bpf))

View File

@@ -0,0 +1,814 @@
#include <sys/resource.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <net/if.h>
#include <stdint.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "ucode/module.h"
#define err_return_int(err, ...) do { set_error(err, __VA_ARGS__); return -1; } while(0)
#define err_return(err, ...) do { set_error(err, __VA_ARGS__); return NULL; } while(0)
#define TRUE ucv_boolean_new(true)
static uc_resource_type_t *module_type, *map_type, *map_iter_type, *program_type;
static uc_value_t *registry;
static uc_vm_t *debug_vm;
static struct {
int code;
char *msg;
} last_error;
struct uc_bpf_fd {
int fd;
bool close;
};
struct uc_bpf_map {
struct uc_bpf_fd fd; /* must be first */
unsigned int key_size, val_size;
};
struct uc_bpf_map_iter {
int fd;
unsigned int key_size;
bool has_next;
uint8_t key[];
};
__attribute__((format(printf, 2, 3))) static void
set_error(int errcode, const char *fmt, ...)
{
va_list ap;
free(last_error.msg);
last_error.code = errcode;
last_error.msg = NULL;
if (fmt) {
va_start(ap, fmt);
xvasprintf(&last_error.msg, fmt, ap);
va_end(ap);
}
}
static void init_env(void)
{
static bool init_done = false;
struct rlimit limit = {
.rlim_cur = RLIM_INFINITY,
.rlim_max = RLIM_INFINITY,
};
if (init_done)
return;
setrlimit(RLIMIT_MEMLOCK, &limit);
init_done = true;
}
static uc_value_t *
uc_bpf_error(uc_vm_t *vm, size_t nargs)
{
uc_value_t *numeric = uc_fn_arg(0);
const char *msg = last_error.msg;
int code = last_error.code;
uc_stringbuf_t *buf;
const char *s;
if (last_error.code == 0)
return NULL;
set_error(0, NULL);
if (ucv_is_truish(numeric))
return ucv_int64_new(code);
buf = ucv_stringbuf_new();
if (code < 0 && msg) {
ucv_stringbuf_addstr(buf, msg, strlen(msg));
} else {
s = strerror(code);
ucv_stringbuf_addstr(buf, s, strlen(s));
if (msg)
ucv_stringbuf_printf(buf, ": %s", msg);
}
return ucv_stringbuf_finish(buf);
}
static int
uc_bpf_module_set_opts(struct bpf_object *obj, uc_value_t *opts)
{
uc_value_t *val;
if (!opts)
return 0;
if (ucv_type(opts) != UC_OBJECT)
err_return_int(EINVAL, "options argument");
if ((val = ucv_object_get(opts, "rodata", NULL)) != NULL) {
struct bpf_map *map = NULL;
if (ucv_type(val) != UC_STRING)
err_return_int(EINVAL, "rodata type");
while ((map = bpf_object__next_map(obj, map)) != NULL) {
if (!strstr(bpf_map__name(map), ".rodata"))
continue;
break;
}
if (!map)
err_return_int(errno, "rodata map");
if (bpf_map__set_initial_value(map, ucv_string_get(val),
ucv_string_length(val)))
err_return_int(errno, "rodata");
}
if ((val = ucv_object_get(opts, "program-type", NULL)) != NULL) {
if (ucv_type(val) != UC_OBJECT)
err_return_int(EINVAL, "prog_types argument");
ucv_object_foreach(val, name, type) {
struct bpf_program *prog;
if (ucv_type(type) != UC_INTEGER)
err_return_int(EINVAL, "program %s type", name);
prog = bpf_object__find_program_by_name(obj, name);
if (!prog)
err_return_int(-1, "program %s not found", name);
bpf_program__set_type(prog, ucv_int64_get(type));
}
}
return 0;
}
static uc_value_t *
uc_bpf_open_module(uc_vm_t *vm, size_t nargs)
{
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, bpf_opts);
uc_value_t *path = uc_fn_arg(0);
uc_value_t *opts = uc_fn_arg(1);
struct bpf_object *obj;
if (ucv_type(path) != UC_STRING)
err_return(EINVAL, "module path");
init_env();
obj = bpf_object__open_file(ucv_string_get(path), &bpf_opts);
if (libbpf_get_error(obj))
err_return(errno, NULL);
if (uc_bpf_module_set_opts(obj, opts)) {
bpf_object__close(obj);
return NULL;
}
if (bpf_object__load(obj)) {
bpf_object__close(obj);
err_return(errno, NULL);
}
return uc_resource_new(module_type, obj);
}
static uc_value_t *
uc_bpf_map_create(int fd, unsigned int key_size, unsigned int val_size, bool close)
{
struct uc_bpf_map *uc_map;
uc_map = xalloc(sizeof(*uc_map));
uc_map->fd.fd = fd;
uc_map->key_size = key_size;
uc_map->val_size = val_size;
uc_map->fd.close = close;
return uc_resource_new(map_type, uc_map);
}
static uc_value_t *
uc_bpf_open_map(uc_vm_t *vm, size_t nargs)
{
struct bpf_map_info info;
uc_value_t *path = uc_fn_arg(0);
__u32 len = sizeof(info);
int err;
int fd;
if (ucv_type(path) != UC_STRING)
err_return(EINVAL, "module path");
fd = bpf_obj_get(ucv_string_get(path));
if (fd < 0)
err_return(errno, NULL);
err = bpf_obj_get_info_by_fd(fd, &info, &len);
if (err) {
close(fd);
err_return(errno, NULL);
}
return uc_bpf_map_create(fd, info.key_size, info.value_size, true);
}
static uc_value_t *
uc_bpf_open_program(uc_vm_t *vm, size_t nargs)
{
uc_value_t *path = uc_fn_arg(0);
struct uc_bpf_fd *f;
int fd;
if (ucv_type(path) != UC_STRING)
err_return(EINVAL, "module path");
fd = bpf_obj_get(ucv_string_get(path));
if (fd < 0)
err_return(errno, NULL);
f = xalloc(sizeof(*f));
f->fd = fd;
f->close = true;
return uc_resource_new(program_type, f);
}
static uc_value_t *
uc_bpf_module_get_maps(uc_vm_t *vm, size_t nargs)
{
struct bpf_object *obj = uc_fn_thisval("bpf.module");
struct bpf_map *map = NULL;
uc_value_t *rv;
int i = 0;
if (!obj)
err_return(EINVAL, NULL);
rv = ucv_array_new(vm);
bpf_object__for_each_map(map, obj)
ucv_array_set(rv, i++, ucv_string_new(bpf_map__name(map)));
return rv;
}
static uc_value_t *
uc_bpf_module_get_map(uc_vm_t *vm, size_t nargs)
{
struct bpf_object *obj = uc_fn_thisval("bpf.module");
struct bpf_map *map;
uc_value_t *name = uc_fn_arg(0);
int fd;
if (!obj || ucv_type(name) != UC_STRING)
err_return(EINVAL, NULL);
map = bpf_object__find_map_by_name(obj, ucv_string_get(name));
if (!map)
err_return(errno, NULL);
fd = bpf_map__fd(map);
if (fd < 0)
err_return(EINVAL, NULL);
return uc_bpf_map_create(fd, bpf_map__key_size(map), bpf_map__value_size(map), false);
}
static uc_value_t *
uc_bpf_module_get_programs(uc_vm_t *vm, size_t nargs)
{
struct bpf_object *obj = uc_fn_thisval("bpf.module");
struct bpf_program *prog = NULL;
uc_value_t *rv;
int i = 0;
if (!obj)
err_return(EINVAL, NULL);
rv = ucv_array_new(vm);
bpf_object__for_each_program(prog, obj)
ucv_array_set(rv, i++, ucv_string_new(bpf_program__name(prog)));
return rv;
}
static uc_value_t *
uc_bpf_module_get_program(uc_vm_t *vm, size_t nargs)
{
struct bpf_object *obj = uc_fn_thisval("bpf.module");
struct bpf_program *prog;
uc_value_t *name = uc_fn_arg(0);
struct uc_bpf_fd *f;
int fd;
if (!obj || !name || ucv_type(name) != UC_STRING)
err_return(EINVAL, NULL);
prog = bpf_object__find_program_by_name(obj, ucv_string_get(name));
if (!prog)
err_return(errno, NULL);
fd = bpf_program__fd(prog);
if (fd < 0)
err_return(EINVAL, NULL);
f = xalloc(sizeof(*f));
f->fd = fd;
return uc_resource_new(program_type, f);
}
static void *
uc_bpf_map_arg(uc_value_t *val, const char *kind, unsigned int size)
{
static union {
uint32_t u32;
uint64_t u64;
} val_int;
switch (ucv_type(val)) {
case UC_INTEGER:
if (size == 4)
val_int.u32 = ucv_int64_get(val);
else if (size == 8)
val_int.u64 = ucv_int64_get(val);
else
break;
return &val_int;
case UC_STRING:
if (size != ucv_string_length(val))
break;
return ucv_string_get(val);
default:
err_return(EINVAL, "%s type", kind);
}
err_return(EINVAL, "%s size mismatch (expected: %d)", kind, size);
}
static uc_value_t *
uc_bpf_map_get(uc_vm_t *vm, size_t nargs)
{
struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
uc_value_t *a_key = uc_fn_arg(0);
void *key, *val;
if (!map)
err_return(EINVAL, NULL);
key = uc_bpf_map_arg(a_key, "key", map->key_size);
if (!key)
return NULL;
val = alloca(map->val_size);
if (bpf_map_lookup_elem(map->fd.fd, key, val))
return NULL;
return ucv_string_new_length(val, map->val_size);
}
static uc_value_t *
uc_bpf_map_set(uc_vm_t *vm, size_t nargs)
{
struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
uc_value_t *a_key = uc_fn_arg(0);
uc_value_t *a_val = uc_fn_arg(1);
uc_value_t *a_flags = uc_fn_arg(2);
uint64_t flags;
void *key, *val;
if (!map)
err_return(EINVAL, NULL);
key = uc_bpf_map_arg(a_key, "key", map->key_size);
if (!key)
return NULL;
val = uc_bpf_map_arg(a_val, "value", map->val_size);
if (!val)
return NULL;
if (!a_flags)
flags = BPF_ANY;
else if (ucv_type(a_flags) != UC_INTEGER)
err_return(EINVAL, "flags");
else
flags = ucv_int64_get(a_flags);
if (bpf_map_update_elem(map->fd.fd, key, val, flags))
return NULL;
return ucv_string_new_length(val, map->val_size);
}
static uc_value_t *
uc_bpf_map_delete(uc_vm_t *vm, size_t nargs)
{
struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
uc_value_t *a_key = uc_fn_arg(0);
uc_value_t *a_return = uc_fn_arg(1);
void *key, *val = NULL;
int ret;
if (!map)
err_return(EINVAL, NULL);
key = uc_bpf_map_arg(a_key, "key", map->key_size);
if (!key)
return NULL;
if (!ucv_is_truish(a_return)) {
ret = bpf_map_delete_elem(map->fd.fd, key);
return ucv_boolean_new(ret == 0);
}
val = alloca(map->val_size);
if (bpf_map_lookup_and_delete_elem(map->fd.fd, key, val))
return NULL;
return ucv_string_new_length(val, map->val_size);
}
static uc_value_t *
uc_bpf_map_delete_all(uc_vm_t *vm, size_t nargs)
{
struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
uc_value_t *filter = uc_fn_arg(0);
bool has_next;
void *key, *next;
if (!map)
err_return(EINVAL, NULL);
key = alloca(map->key_size);
next = alloca(map->key_size);
has_next = !bpf_map_get_next_key(map->fd.fd, NULL, next);
while (has_next) {
bool skip = false;
memcpy(key, next, map->key_size);
has_next = !bpf_map_get_next_key(map->fd.fd, next, next);
if (ucv_is_callable(filter)) {
uc_value_t *rv;
uc_value_push(ucv_get(filter));
uc_value_push(ucv_string_new_length((const char *)key, map->key_size));
if (uc_call(1) != EXCEPTION_NONE)
break;
rv = uc_vm_stack_pop(vm);
if (!rv)
break;
skip = !ucv_is_truish(rv);
ucv_put(rv);
}
if (!skip)
bpf_map_delete_elem(map->fd.fd, key);
}
return TRUE;
}
static uc_value_t *
uc_bpf_map_iterator(uc_vm_t *vm, size_t nargs)
{
struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
struct uc_bpf_map_iter *iter;
if (!map)
err_return(EINVAL, NULL);
iter = xalloc(sizeof(*iter) + map->key_size);
iter->fd = map->fd.fd;
iter->key_size = map->key_size;
iter->has_next = !bpf_map_get_next_key(iter->fd, NULL, &iter->key);
return uc_resource_new(map_iter_type, iter);
}
static uc_value_t *
uc_bpf_map_iter_next(uc_vm_t *vm, size_t nargs)
{
struct uc_bpf_map_iter *iter = uc_fn_thisval("bpf.map_iter");
uc_value_t *rv;
if (!iter->has_next)
return NULL;
rv = ucv_string_new_length((const char *)iter->key, iter->key_size);
iter->has_next = !bpf_map_get_next_key(iter->fd, &iter->key, &iter->key);
return rv;
}
static uc_value_t *
uc_bpf_map_iter_next_int(uc_vm_t *vm, size_t nargs)
{
struct uc_bpf_map_iter *iter = uc_fn_thisval("bpf.map_iter");
uint64_t intval;
uc_value_t *rv;
if (!iter->has_next)
return NULL;
if (iter->key_size == 4)
intval = *(uint32_t *)iter->key;
else if (iter->key_size == 8)
intval = *(uint64_t *)iter->key;
else
return NULL;
rv = ucv_int64_new(intval);
iter->has_next = !bpf_map_get_next_key(iter->fd, &iter->key, &iter->key);
return rv;
}
static uc_value_t *
uc_bpf_map_foreach(uc_vm_t *vm, size_t nargs)
{
struct uc_bpf_map *map = uc_fn_thisval("bpf.map");
uc_value_t *func = uc_fn_arg(0);
bool has_next;
void *key, *next;
bool ret = false;
key = alloca(map->key_size);
next = alloca(map->key_size);
has_next = !bpf_map_get_next_key(map->fd.fd, NULL, next);
while (has_next) {
uc_value_t *rv;
bool stop;
memcpy(key, next, map->key_size);
has_next = !bpf_map_get_next_key(map->fd.fd, next, next);
uc_value_push(ucv_get(func));
uc_value_push(ucv_string_new_length((const char *)key, map->key_size));
if (uc_call(1) != EXCEPTION_NONE)
break;
rv = uc_vm_stack_pop(vm);
stop = (ucv_type(rv) == UC_BOOLEAN && !ucv_boolean_get(rv));
ucv_put(rv);
if (stop)
break;
ret = true;
}
return ucv_boolean_new(ret);
}
static uc_value_t *
uc_bpf_obj_pin(uc_vm_t *vm, size_t nargs, const char *type)
{
struct uc_bpf_fd *f = uc_fn_thisval(type);
uc_value_t *path = uc_fn_arg(0);
if (ucv_type(path) != UC_STRING)
err_return(EINVAL, NULL);
if (bpf_obj_pin(f->fd, ucv_string_get(path)))
err_return(errno, NULL);
return TRUE;
}
static uc_value_t *
uc_bpf_program_pin(uc_vm_t *vm, size_t nargs)
{
return uc_bpf_obj_pin(vm, nargs, "bpf.program");
}
static uc_value_t *
uc_bpf_map_pin(uc_vm_t *vm, size_t nargs)
{
return uc_bpf_obj_pin(vm, nargs, "bpf.map");
}
static uc_value_t *
uc_bpf_set_tc_hook(uc_value_t *ifname, uc_value_t *type, uc_value_t *prio,
int fd)
{
DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook);
DECLARE_LIBBPF_OPTS(bpf_tc_opts, attach_tc,
.handle = 1);
const char *type_str;
uint64_t prio_val;
if (ucv_type(ifname) != UC_STRING || ucv_type(type) != UC_STRING ||
ucv_type(prio) != UC_INTEGER)
err_return(EINVAL, NULL);
prio_val = ucv_int64_get(prio);
if (prio_val > 0xffff)
err_return(EINVAL, NULL);
type_str = ucv_string_get(type);
if (!strcmp(type_str, "ingress"))
hook.attach_point = BPF_TC_INGRESS;
else if (!strcmp(type_str, "egress"))
hook.attach_point = BPF_TC_EGRESS;
else
err_return(EINVAL, NULL);
hook.ifindex = if_nametoindex(ucv_string_get(ifname));
if (!hook.ifindex)
goto error;
bpf_tc_hook_create(&hook);
attach_tc.priority = prio_val;
if (bpf_tc_detach(&hook, &attach_tc) < 0 && fd < 0)
goto error;
if (fd < 0)
goto out;
attach_tc.prog_fd = fd;
if (bpf_tc_attach(&hook, &attach_tc) < 0)
goto error;
out:
return TRUE;
error:
if (fd >= 0)
err_return(ENOENT, NULL);
return NULL;
}
static uc_value_t *
uc_bpf_program_tc_attach(uc_vm_t *vm, size_t nargs)
{
struct uc_bpf_fd *f = uc_fn_thisval("bpf.program");
uc_value_t *ifname = uc_fn_arg(0);
uc_value_t *type = uc_fn_arg(1);
uc_value_t *prio = uc_fn_arg(2);
if (!f)
err_return(EINVAL, NULL);
return uc_bpf_set_tc_hook(ifname, type, prio, f->fd);
}
static uc_value_t *
uc_bpf_tc_detach(uc_vm_t *vm, size_t nargs)
{
uc_value_t *ifname = uc_fn_arg(0);
uc_value_t *type = uc_fn_arg(1);
uc_value_t *prio = uc_fn_arg(2);
return uc_bpf_set_tc_hook(ifname, type, prio, -1);
}
static int
uc_bpf_debug_print(enum libbpf_print_level level, const char *format,
va_list args)
{
char buf[256], *str = NULL;
uc_value_t *val;
va_list ap;
int size;
va_copy(ap, args);
size = vsnprintf(buf, sizeof(buf), format, ap);
va_end(ap);
if (size > 0 && (unsigned long)size < ARRAY_SIZE(buf) - 1) {
val = ucv_string_new(buf);
goto out;
}
if (vasprintf(&str, format, args) < 0)
return 0;
val = ucv_string_new(str);
free(str);
out:
uc_vm_stack_push(debug_vm, ucv_get(ucv_array_get(registry, 0)));
uc_vm_stack_push(debug_vm, ucv_int64_new(level));
uc_vm_stack_push(debug_vm, val);
if (uc_vm_call(debug_vm, false, 2) == EXCEPTION_NONE)
ucv_put(uc_vm_stack_pop(debug_vm));
return 0;
}
static uc_value_t *
uc_bpf_set_debug_handler(uc_vm_t *vm, size_t nargs)
{
uc_value_t *handler = uc_fn_arg(0);
if (handler && !ucv_is_callable(handler))
err_return(EINVAL, NULL);
debug_vm = vm;
libbpf_set_print(handler ? uc_bpf_debug_print : NULL);
ucv_array_set(registry, 0, ucv_get(handler));
return NULL;
}
static void
register_constants(uc_vm_t *vm, uc_value_t *scope)
{
#define ADD_CONST(x) ucv_object_add(scope, #x, ucv_int64_new(x))
ADD_CONST(BPF_PROG_TYPE_SCHED_CLS);
ADD_CONST(BPF_PROG_TYPE_SCHED_ACT);
ADD_CONST(BPF_ANY);
ADD_CONST(BPF_NOEXIST);
ADD_CONST(BPF_EXIST);
ADD_CONST(BPF_F_LOCK);
}
static const uc_function_list_t module_fns[] = {
{ "get_map", uc_bpf_module_get_map },
{ "get_maps", uc_bpf_module_get_maps },
{ "get_programs", uc_bpf_module_get_programs },
{ "get_program", uc_bpf_module_get_program },
};
static void module_free(void *ptr)
{
struct bpf_object *obj = ptr;
bpf_object__close(obj);
}
static const uc_function_list_t map_fns[] = {
{ "pin", uc_bpf_map_pin },
{ "get", uc_bpf_map_get },
{ "set", uc_bpf_map_set },
{ "delete", uc_bpf_map_delete },
{ "delete_all", uc_bpf_map_delete_all },
{ "foreach", uc_bpf_map_foreach },
{ "iterator", uc_bpf_map_iterator },
};
static void uc_bpf_fd_free(void *ptr)
{
struct uc_bpf_fd *f = ptr;
if (f->close)
close(f->fd);
free(f);
}
static const uc_function_list_t map_iter_fns[] = {
{ "next", uc_bpf_map_iter_next },
{ "next_int", uc_bpf_map_iter_next_int },
};
static const uc_function_list_t prog_fns[] = {
{ "pin", uc_bpf_program_pin },
{ "tc_attach", uc_bpf_program_tc_attach },
};
static const uc_function_list_t global_fns[] = {
{ "error", uc_bpf_error },
{ "set_debug_handler", uc_bpf_set_debug_handler },
{ "open_module", uc_bpf_open_module },
{ "open_map", uc_bpf_open_map },
{ "open_program", uc_bpf_open_program },
{ "tc_detach", uc_bpf_tc_detach },
};
void uc_module_init(uc_vm_t *vm, uc_value_t *scope)
{
uc_function_list_register(scope, global_fns);
register_constants(vm, scope);
registry = ucv_array_new(vm);
uc_vm_registry_set(vm, "bpf.registry", registry);
module_type = uc_type_declare(vm, "bpf.module", module_fns, module_free);
map_type = uc_type_declare(vm, "bpf.map", map_fns, uc_bpf_fd_free);
map_iter_type = uc_type_declare(vm, "bpf.map_iter", map_iter_fns, free);
program_type = uc_type_declare(vm, "bpf.program", prog_fns, uc_bpf_fd_free);
}

View File

@@ -0,0 +1,102 @@
From c0e413c21f7b114a70282041a0049196869dd15f Mon Sep 17 00:00:00 2001
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 4 Jan 2023 14:12:43 +0100
Subject: [PATCH] include: add uc_fn_thisval()
Can be used to get rid of a layer of pointer indirection in resource type
handlers.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
include/ucode/lib.h | 14 +++++++++++++-
include/ucode/types.h | 1 +
types.c | 22 +++++++++++++++++++---
3 files changed, 33 insertions(+), 4 deletions(-)
diff --git a/include/ucode/lib.h b/include/ucode/lib.h
index 0709702..74d8866 100644
--- a/include/ucode/lib.h
+++ b/include/ucode/lib.h
@@ -38,14 +38,26 @@ __hidden void uc_error_message_indent(char **msg);
__hidden uc_value_t *uc_require_library(uc_vm_t *vm, uc_value_t *nameval, bool so_only);
/* vm helper */
+static inline uc_value_t *
+_uc_fn_this_res(uc_vm_t *vm)
+{
+ return vm->callframes.entries[vm->callframes.count - 1].ctx;
+}
static inline void *
_uc_fn_this(uc_vm_t *vm, const char *expected_type)
{
- return ucv_resource_dataptr(vm->callframes.entries[vm->callframes.count - 1].ctx, expected_type);
+ return ucv_resource_dataptr(_uc_fn_this_res(vm), expected_type);
+}
+
+static inline void *
+_uc_fn_thisval(uc_vm_t *vm, const char *expected_type)
+{
+ return ucv_resource_data(_uc_fn_this_res(vm), expected_type);
}
#define uc_fn_this(...) _uc_fn_this(vm, __VA_ARGS__)
+#define uc_fn_thisval(...) _uc_fn_thisval(vm, __VA_ARGS__)
static inline uc_value_t *
_uc_fn_arg(uc_vm_t *vm, size_t nargs, size_t n)
diff --git a/include/ucode/types.h b/include/ucode/types.h
index bae2dd5..22fe9a9 100644
--- a/include/ucode/types.h
+++ b/include/ucode/types.h
@@ -392,6 +392,7 @@ uc_resource_type_t *ucv_resource_type_add(uc_vm_t *, const char *, uc_value_t *,
uc_resource_type_t *ucv_resource_type_lookup(uc_vm_t *, const char *);
uc_value_t *ucv_resource_new(uc_resource_type_t *, void *);
+void *ucv_resource_data(uc_value_t *uv, const char *);
void **ucv_resource_dataptr(uc_value_t *, const char *);
uc_value_t *ucv_regexp_new(const char *, bool, bool, bool, char **);
diff --git a/types.c b/types.c
index 8a7986b..cde2221 100644
--- a/types.c
+++ b/types.c
@@ -1096,8 +1096,8 @@ ucv_resource_new(uc_resource_type_t *type, void *data)
return &res->header;
}
-void **
-ucv_resource_dataptr(uc_value_t *uv, const char *name)
+static uc_resource_t *
+ucv_resource_check(uc_value_t *uv, const char *name)
{
uc_resource_t *res = (uc_resource_t *)uv;
@@ -1109,7 +1109,23 @@ ucv_resource_dataptr(uc_value_t *uv, const char *name)
return NULL;
}
- return &res->data;
+ return res;
+}
+
+void *
+ucv_resource_data(uc_value_t *uv, const char *name)
+{
+ uc_resource_t *res = ucv_resource_check(uv, name);
+
+ return res ? res->data : NULL;
+}
+
+void **
+ucv_resource_dataptr(uc_value_t *uv, const char *name)
+{
+ uc_resource_t *res = ucv_resource_check(uv, name);
+
+ return res ? &res->data : NULL;
}
--
2.34.1

View File

@@ -0,0 +1,51 @@
#
# Copyright (C) 2021 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=udevstats
PKG_VERSION:=1
PKG_LICENSE:=GPL-2.0
PKG_MAINTAINER:=Felix Fietkau <nbd@nbd.name>
PKG_BUILD_DEPENDS:=bpf-headers
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/bpf.mk
define Package/udevstats
SECTION:=utils
CATEGORY:=Utilities
TITLE:=Device statistics module
DEPENDS:=+ucode +ucode-mod-ubus +ucode-mod-bpf +kmod-sched-bpf +ucode-mod-struct
endef
define Build/Compile
$(call CompileBPF,$(PKG_BUILD_DIR)/udevstats-bpf.c)
endef
define Package/udevstats/conffiles
/etc/config/udevstats
endef
define Package/udevstats/install
$(INSTALL_DIR) \
$(1)/etc/hotplug.d/net \
$(1)/etc/init.d \
$(1)/etc/config \
$(1)/lib/bpf \
$(1)/usr/sbin
$(INSTALL_DATA) $(PKG_BUILD_DIR)/udevstats-bpf.o $(1)/lib/bpf/udevstats.o
$(INSTALL_BIN) ./files/udevstats.init $(1)/etc/init.d/udevstats
$(INSTALL_BIN) ./files/udevstats.uc $(1)/usr/sbin/udevstats
$(INSTALL_DATA) ./files/udevstats.conf $(1)/etc/config/udevstats
$(INSTALL_DATA) ./files/udevstats.hotplug $(1)/etc/hotplug.d/net/10-udevstats
endef
$(eval $(call BuildPackage,udevstats))

View File

@@ -0,0 +1,6 @@
config device
option disabled 1
# option name sw0
# list vlan 0
# list vlan 1
# list vlan 2

View File

@@ -0,0 +1,2 @@
#!/bin/sh
ubus call udevstats check_devices

View File

@@ -0,0 +1,62 @@
#!/bin/sh /etc/rc.common
# Copyright (c) 2021 OpenWrt.org
START=18
USE_PROCD=1
PROG=/usr/sbin/udevstats
add_vlan() {
local vid="$1"
json_add_array
json_add_int "" "$vid"
[ "$rx" -gt 0 ] && json_add_string "" "rx"
[ "$tx" -gt 0 ] && json_add_string "" "tx"
json_close_array
}
add_device() {
local cfg="$1"
config_get_bool disabled "$cfg" disabled 0
[ "$disabled" -gt 0 ] && return
config_get name "$cfg" name
json_add_array "$name"
config_get_bool rx "$cfg" rx 1
config_get_bool tx "$cfg" tx 1
config_list_foreach "$cfg" vlan add_vlan
json_close_array
}
reload_service() {
json_init
config_load udevstats
json_add_object devices
config_foreach add_device device
json_close_object
ubus call udevstats config_set "$(json_dump)"
}
service_triggers() {
procd_add_reload_trigger udevstats
}
start_service() {
procd_open_instance
procd_set_param command "$PROG"
procd_set_param respawn
procd_close_instance
}
service_started() {
ubus -t 10 wait_for udevstats
[ $? = 0 ] && reload_service
}

View File

@@ -0,0 +1,249 @@
#!/usr/bin/ucode
'use strict';
let bpf = require("bpf");
let struct = require("struct");
let fs = require("fs");
let ubus = require("ubus");
let uloop = require("uloop");
let PRIO_VAL = 0x200;
bpf.set_debug_handler(function(level, msg) { print(`[${level}] ${msg}`); });
let bpf_mod = bpf.open_module("/lib/bpf/udevstats.o", {
"program-type": {
udevstats_in: bpf.BPF_PROG_TYPE_SCHED_CLS,
udevstats_out: bpf.BPF_PROG_TYPE_SCHED_CLS
}
});
assert(bpf_mod, `Could not load BPF module: ${bpf.error()}`);
bpf.set_debug_handler(null);
let map = bpf_mod.get_map("vlans");
assert(map, `Could not find vlan map in BPF module`);
let prog = {
ingress: bpf_mod.get_program("udevstats_in"),
egress: bpf_mod.get_program("udevstats_out")
};
assert(prog.ingress && prog.egress, "Missing BPF program");
function device_list_init() {
return {
ingress: {},
egress: {}
};
}
let old_hooks = device_list_init();
let hooks = device_list_init();
let old_vlans = {};
let vlans = {};
let vlan_config;
function device_update_start() {
old_hooks = hooks;
hooks = device_list_init();
}
function dev_ifindex(name) {
try {
return int(fs.readfile(`/sys/class/net/${name}/ifindex`));
} catch (e) {
return 0
}
}
function device_hook_get(name, tx) {
let ifindex;
let dev;
let type = tx ? "ingress" : "egress";
dev = hooks[type][name];
if (dev)
return dev;
dev = old_hooks[type][name];
if (dev) {
delete old_hooks[type][name];
} else {
dev = {
vlans: {}
};
}
ifindex = dev_ifindex(name);
if (!ifindex)
return null;
if (dev.ifindex != ifindex)
prog[type].tc_attach(name, type, PRIO_VAL);
dev.ifindex = ifindex;
dev.tx = tx;
hooks[type][name] = dev;
return dev;
}
function device_update_end() {
for (let type in [ "ingress", "egress" ])
for (let dev in old_hooks[type])
bpf.tc_detach(dev, type, PRIO_VAL);
old_hooks = device_list_init();
}
function vlan_update_start() {
old_vlans = vlans;
vlans = {};
device_update_start();
}
function vlan_update_end() {
device_update_end();
for (let key in old_vlans)
map.delete(b64dec(key));
}
function vlan_key(ifindex, vid, tx, ad)
{
return struct.pack("IH??", ifindex, vid, tx, ad);
}
function vlan_add(dev, vid, ad)
{
if (!dev.ifindex)
return;
let key = vlan_key(dev.ifindex, vid, dev.tx, ad);
let keystr = b64enc(key);
if (old_vlans[keystr])
delete old_vlans[keystr];
else
map.set(key, struct.pack("QQ", 0, 0));
vlans[keystr] = true;
}
function vlan_set_config(config)
{
vlan_config = config;
vlan_update_start();
for (let dev in config) {
for (let vlan in config[dev]) {
vlan = [...vlan];
let vid = shift(vlan);
for (let type in vlan) {
let tx;
if (type == "tx")
tx = true;
else if (type == "rx")
tx = false;
else
continue;
let hook = device_hook_get(dev, tx);
vlan_add(hook, vid, false);
}
}
}
vlan_update_end();
}
function vlan_dump_stats()
{
let stats = {};
for (let dev in vlan_config) {
stats[dev] = [];
let vlans = sort(vlan_config[dev], (a, b) => a[0] - b[0]);
for (let vlan in vlans) {
let vlan_stats = {
vid: vlan[0]
};
for (let tx in [ true, false ]) {
let hook = device_hook_get(dev, tx);
if (!hook.ifindex)
continue;
let stats = map.get(vlan_key(hook.ifindex, vlan[0], tx, false));
if (!stats)
continue;
stats = struct.unpack("QQ", stats);
vlan_stats[tx ? "tx" : "rx"] = {
packets: stats[0],
bytes: stats[1]
}
}
push(stats[dev], vlan_stats);
}
}
return stats;
}
function run_service() {
let uctx = ubus.connect();
uctx.publish("udevstats", {
config_set: {
call: function(req) {
if (!req.args.devices)
return ubus.STATUS_INVALID_ARGUMENT;
vlan_set_config(req.args.devices);
return 0;
},
args: {
"devices": {}
}
},
check_devices: {
call: function(req) {
if (vlan_config)
vlan_set_config(vlan_config);
return 0;
},
args: {}
},
reset: {
call: function(req) {
let old_config = vlan_config;
vlan_set_config({});
if (old_config)
vlan_set_config(old_config);
return 0;
},
args: {},
},
dump: {
call: function(req) {
return vlan_dump_stats();
},
args: {}
}
});
try {
uloop.run();
} catch(e) {
warn(`Error: ${e}\n${e.stacktrace[0].context}`);
}
vlan_set_config({});
}
uloop.init();
run_service();
uloop.done();

View File

@@ -0,0 +1,173 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2022 Felix Fietkau <nbd@nbd.name>
*/
#ifndef __BPF_SKB_UTILS_H
#define __BPF_SKB_UTILS_H
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/ip.h>
#include <uapi/linux/ipv6.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
struct skb_parser_info {
struct __sk_buff *skb;
__u32 offset;
int proto;
};
static __always_inline void *__skb_data(struct __sk_buff *skb)
{
return (void *)(long)READ_ONCE(skb->data);
}
static __always_inline void *
skb_ptr(struct __sk_buff *skb, __u32 offset, __u32 len)
{
void *ptr = __skb_data(skb) + offset;
void *end = (void *)(long)(skb->data_end);
if (ptr + len >= end)
return NULL;
return ptr;
}
static __always_inline void *
skb_info_ptr(struct skb_parser_info *info, __u32 len)
{
__u32 offset = info->offset;
return skb_ptr(info->skb, offset, len);
}
static __always_inline void
skb_parse_init(struct skb_parser_info *info, struct __sk_buff *skb)
{
*info = (struct skb_parser_info){
.skb = skb
};
}
static __always_inline struct ethhdr *
skb_parse_ethernet(struct skb_parser_info *info)
{
struct ethhdr *eth;
int len;
len = sizeof(*eth) + 2 * sizeof(struct vlan_hdr) + sizeof(struct ipv6hdr);
if (len > info->skb->len)
len = info->skb->len;
bpf_skb_pull_data(info->skb, len);
eth = skb_info_ptr(info, sizeof(*eth));
if (!eth)
return NULL;
info->proto = eth->h_proto;
info->offset += sizeof(*eth);
return eth;
}
static __always_inline struct vlan_hdr *
skb_parse_vlan(struct skb_parser_info *info)
{
struct vlan_hdr *vlh;
if (info->proto != bpf_htons(ETH_P_8021Q) &&
info->proto != bpf_htons(ETH_P_8021AD))
return NULL;
vlh = skb_info_ptr(info, sizeof(*vlh));
if (!vlh)
return NULL;
info->proto = vlh->h_vlan_encapsulated_proto;
info->offset += sizeof(*vlh);
return vlh;
}
static __always_inline struct iphdr *
skb_parse_ipv4(struct skb_parser_info *info, int min_l4_bytes)
{
struct iphdr *iph;
int proto, hdr_len;
__u32 pull_len;
if (info->proto != bpf_htons(ETH_P_IP))
return NULL;
iph = skb_info_ptr(info, sizeof(*iph));
if (!iph)
return NULL;
hdr_len = iph->ihl * 4;
if (hdr_len < sizeof(*iph))
return NULL;
pull_len = info->offset + hdr_len + min_l4_bytes;
if (pull_len > info->skb->len)
pull_len = info->skb->len;
if (bpf_skb_pull_data(info->skb, pull_len))
return NULL;
iph = skb_info_ptr(info, sizeof(*iph));
if (!iph)
return NULL;
info->proto = iph->protocol;
info->offset += hdr_len;
return iph;
}
static __always_inline struct ipv6hdr *
skb_parse_ipv6(struct skb_parser_info *info, int max_l4_bytes)
{
struct ipv6hdr *ip6h;
__u32 pull_len;
if (info->proto != bpf_htons(ETH_P_IPV6))
return NULL;
pull_len = info->offset + sizeof(*ip6h) + max_l4_bytes;
if (pull_len > info->skb->len)
pull_len = info->skb->len;
if (bpf_skb_pull_data(info->skb, pull_len))
return NULL;
ip6h = skb_info_ptr(info, sizeof(*ip6h));
if (!ip6h)
return NULL;
info->proto = READ_ONCE(ip6h->nexthdr);
info->offset += sizeof(*ip6h);
return ip6h;
}
static __always_inline struct tcphdr *
skb_parse_tcp(struct skb_parser_info *info)
{
struct tcphdr *tcph;
if (info->proto != IPPROTO_TCP)
return NULL;
tcph = skb_info_ptr(info, sizeof(*tcph));
if (!tcph)
return NULL;
info->offset += tcph->doff * 4;
return tcph;
}
#endif

View File

@@ -0,0 +1,74 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2022 Felix Fietkau <nbd@nbd.name>
*/
#define KBUILD_MODNAME "udevstats"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/filter.h>
#include <uapi/linux/pkt_cls.h>
#include <linux/if_vlan.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include "bpf_skb_utils.h"
#include "udevstats-bpf.h"
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(key_size, sizeof(struct udevstats_vlan_key));
__type(value, struct udevstats_vlan_stats);
__uint(max_entries, 1000);
__uint(map_flags, BPF_F_NO_PREALLOC);
} vlans SEC(".maps");
static inline int udevstats_handle_packet(struct __sk_buff *skb, int ifindex, bool tx)
{
struct udevstats_vlan_stats *stats;
struct udevstats_vlan_key key = {
.vlan_tx = tx,
.vlan_ifindex = ifindex,
};
struct skb_parser_info info;
struct vlan_hdr *vlan;
__be16 vlan_proto = 0;
skb_parse_init(&info, skb);
if (!skb_parse_ethernet(&info))
return TC_ACT_UNSPEC;
if (skb->vlan_present) {
key.vlan_id = skb->vlan_tci;
vlan_proto = skb->vlan_proto;
} else if ((vlan = skb_parse_vlan(&info)) != NULL) {
vlan_proto = info.proto;
key.vlan_id = bpf_ntohs(vlan->h_vlan_TCI);
}
key.vlan_id &= VLAN_VID_MASK;
key.vlan_is_ad = vlan_proto == bpf_htons(ETH_P_8021AD);
stats = bpf_map_lookup_elem(&vlans, &key);
if (!stats)
return TC_ACT_UNSPEC;
__sync_fetch_and_add(&stats->packets, 1);
__sync_fetch_and_add(&stats->bytes, skb->len);
return TC_ACT_UNSPEC;
}
SEC("tc/egress")
int udevstats_out(struct __sk_buff *skb)
{
return udevstats_handle_packet(skb, skb->ifindex, true);
}
SEC("tc/ingress")
int udevstats_in(struct __sk_buff *skb)
{
return udevstats_handle_packet(skb, skb->ingress_ifindex, false);
}
char _license[] SEC("license") = "GPL";

View File

@@ -0,0 +1,20 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2022 Felix Fietkau <nbd@nbd.name>
*/
#ifndef __BPF_UDEVSTATS_H
#define __BPF_UDEVSTATS_H
struct udevstats_vlan_key {
uint32_t vlan_ifindex;
uint16_t vlan_id;
uint8_t vlan_tx;
uint8_t vlan_is_ad;
};
struct udevstats_vlan_stats {
uint64_t packets;
uint64_t bytes;
};
#endif