diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/Makefile b/src/probes/extends/ebpf.probe/src/stackprobe/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4e3eb36d89334f74d8a8b9c09a05aeb7e7c1d33b --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/Makefile @@ -0,0 +1,43 @@ +include ../mk/var.mk +INCLUDES = $(BASE_INC) + +APP := stackprobe +ON_CPU_PROG := oncpu.bpf.o + +SRC_CPLUS := $(wildcard *.cpp) +SRC_CPLUS += $(CPLUSFILES) + +BPF_C := $(wildcard *.bpf.c) +DEPS := $(patsubst %.bpf.c, %.bpf.o, $(BPF_C)) +DEPS += $(patsubst %.cpp, %.o, $(SRC_CPLUS)) + +SRC_C := $(filter-out $(BPF_C), $(wildcard *.c)) +SRC_C += $(CFILES) + +.PHONY: all clean install + +all: pre deps app +pre: $(OUTPUT) +deps: $(DEPS) +# build bpf code +%.bpf.o: %.bpf.c + $(CLANG) $(CFLAGS) -target bpf $(INCLUDES) -c $(filter %.c,$^) -o $@ + $(LLVM_STRIP) -g $@ + +# build c++ files +%.o: %.cpp + $(C++) -c $^ $(CXXFLAGS) $(INCLUDES) -o $@ + +app: $(APP) +%: %.c $(SRC_C) + $(CC) $(CFLAGS) $(patsubst %.cpp, %.o, $(SRC_CPLUS)) $(INCLUDES) $^ $(LDFLAGS) $(LINK_TARGET) -o $@ + @echo $@ "compiling completed." +clean: + rm -rf $(DEPS) + rm -rf $(APP) + +install: + mkdir -p $(INSTALL_DIR) + cp $(APP) $(INSTALL_DIR) + cp $(ON_CPU_PROG) $(INSTALL_DIR) + diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/debug_elf_reader.c b/src/probes/extends/ebpf.probe/src/stackprobe/debug_elf_reader.c new file mode 100644 index 0000000000000000000000000000000000000000..17d327d25a5d29dc96cc42b225b03a3b980cf8e9 --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/debug_elf_reader.c @@ -0,0 +1,615 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: luzhihao + * Create: 2022-08-22 + * Description: debug reader + ******************************************************************************/ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef BPF_PROG_KERN +#undef BPF_PROG_KERN +#endif + +#ifdef BPF_PROG_USER +#undef BPF_PROG_USER +#endif + +#include "bpf.h" +#include "container.h" +#include "gopher_elf.h" +#include "debug_elf_reader.h" + +#if 0 +#define __STAT_INODE "/usr/bin/stat --format=%%i %s" +int get_inode(const char *file, u32 *inode) +{ + char command[COMMAND_LEN]; + char inode_s[INT_LEN]; + + if (access(file, 0) != 0) { + return -1; + } + + command[0] = 0; + inode_s[0] = 0; + (void)snprintf(command, COMMAND_LEN, __STAT_INODE, file); + + if (exec_cmd((const char *)command, inode_s, INT_LEN) < 0) { + return -1; + } + + *inode = (u32)atoi((const char *)inode_s); + return 0; +} +#endif + +#if 1 + +/* + +References https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html + + So, for example, suppose you ask GDB to debug /usr/bin/ls, which has a debug link + that specifies the file ls.debug, and a build ID whose value in hex is abcdef1234. + If the list of the global debug directories includes /usr/lib/debug, then GDB will + look for the following debug information files, in the indicated order: + +- /usr/lib/debug/.build-id/ab/cdef1234.debug +- /usr/bin/ls.debug +- /usr/bin/.debug/ls.debug +- /usr/lib/debug/usr/bin/ls.debug. + +*/ + +#define IS_SYSTEM_ROOT(dir) (!strcmp((dir), "/")) + +static int get_build_id_path(const char* pid_root, const char* dbg_dir, + const char* build_id, char buid_path[], size_t len) +{ + const char *p = build_id + 2; + const char* fmt = "%s%s/.build-id/%c%c/%s.debug"; + (void)snprintf(buid_path, len, fmt, + IS_SYSTEM_ROOT(pid_root) ? "" : pid_root, dbg_dir, build_id[0], build_id[1], p); + + if (access(buid_path, 0) == 0) { + return 0; + } + return -1; +} + +static int __get_path_by_full_path(const char* full_path, char dir[], size_t len) +{ + char *end; + + (void)strncpy(dir, full_path, len - 1); + end = strrchr(dir, '/'); + if (!end) { + return -1; + } + *(end + 1) = 0; + + return 0; +} + +static int get_debug_link_path(const char* pid_root, const char* dbg_dir, + const char* elf, const char* debug_link, char debug_link_path[], size_t len) +{ + size_t elf_dir_len; + char elf_dir[PATH_LEN]; + const char* fmt = "%s%s%s%s"; + + elf_dir[0] = 0; + if (__get_path_by_full_path(elf, elf_dir, PATH_LEN)) { + return -1; + } + + elf_dir_len = strlen(elf_dir); + if (elf_dir_len == 0 || (elf_dir[0] != '/') || (elf_dir[elf_dir_len - 1] != '/')) { + ERROR("[DEBUG_ELF]: Failed to get elf dir(%s).\n", elf); + return -1; + } + + debug_link_path[0] = 0; + + (void)snprintf(debug_link_path, len, fmt, + IS_SYSTEM_ROOT(pid_root) ? "" : pid_root, elf_dir, "", debug_link); + if (access(debug_link_path, 0) == 0) { + return 0; + } + + debug_link_path[0] = 0; + (void)snprintf(debug_link_path, len, fmt, + IS_SYSTEM_ROOT(pid_root) ? "" : pid_root, elf_dir, ".debug/", debug_link); + if (access(debug_link_path, 0) == 0) { + return 0; + } + + debug_link_path[0] = 0; + (void)snprintf(debug_link_path, len, fmt, + IS_SYSTEM_ROOT(pid_root) ? "" : pid_root, dbg_dir, elf_dir, debug_link); + if (access(debug_link_path, 0) == 0) { + return 0; + } + + return -1; +} + + +#define __GET_CONTAINER_ID_CMD "/usr/bin/cat /proc/%d/cpuset | awk -F '/' '{print $NF}'" +static int __get_container_id_by_pid(int pid, char container_id[], size_t len) +{ + char cmd[COMMAND_LEN]; + char buf[CONTAINER_ID_LEN]; + + if (len <= CONTAINER_ABBR_ID_LEN) { + return -1; + } + + buf[0] = 0; + cmd[0] = 0; + (void)snprintf(cmd, COMMAND_LEN, __GET_CONTAINER_ID_CMD, pid); + if (exec_cmd((const char *)cmd, buf, CONTAINER_ID_LEN)) { + return -1; + } + + if (strstr(buf, "No such file")) { + return -1; + } + (void)strncpy(container_id, buf, CONTAINER_ABBR_ID_LEN); + return 0; +} + +#define __GET_ROOT_PATH_CMD "/usr/bin/readlink /proc/%d/root" +static int __get_pid_root_path(int pid, char root_path[], size_t len) +{ + char cmd[COMMAND_LEN]; + + root_path[0] = 0; + cmd[0] = 0; + (void)snprintf(cmd, COMMAND_LEN, __GET_ROOT_PATH_CMD, pid); + if (exec_cmd((const char *)cmd, root_path, len)) { + return -1; + } + return 0; +} + +static int get_pid_root_path(int pid, char root_path[], size_t len) +{ + int ret, path_len; + char pid_root[PATH_LEN]; + char container_root[PATH_LEN]; + char container_id[CONTAINER_ABBR_ID_LEN + 1] = {0}; + + pid_root[0] = 0; + if ((ret = __get_pid_root_path(pid, pid_root, PATH_LEN)) && ret != 0) { + return ret; + } + + (void)__get_container_id_by_pid(pid, container_id, CONTAINER_ABBR_ID_LEN + 1); + if (container_id[0] == 0) { + if (IS_SYSTEM_ROOT(pid_root)) { + (void)strncpy(root_path, pid_root, len - 1); + } else { + /* Eliminate end '/' */ + path_len = strlen(pid_root); + if (pid_root[path_len - 1] == '/') { + pid_root[path_len - 1] = 0; + } + (void)strncpy(root_path, pid_root, len - 1); + } + return 0; + } + + container_root[0] = 0; + if ((ret = get_container_merged_path((const char *)container_id, container_root, PATH_LEN)) && ret != 0) { + return ret; + } + + (void)snprintf(root_path, len, "%s%s", container_root, IS_SYSTEM_ROOT(pid_root) ? "" : pid_root); + + /* Eliminate end '/' */ + path_len = strlen(root_path); + if (root_path[path_len - 1] == '/') { + root_path[path_len - 1] = 0; + } + return 0; +} + + +#endif + +#if 0 + +#define ELF_SYMBO_ERR_INDEX(elf_symbo, index) (((index) < 0) || (elf_symbo->symbs_count <= (index))) + +static int __search_addr_upper_bound(struct elf_symbo_s* elf_symbo, int bgn, int end, u64 target_addr) +{ + int left = bgn, right = end, mid = 0; + + if ((bgn >= end) || (bgn < 0) || (end < 0)) { + return -1; + } + + while (left < right) { + mid = (left + right) / 2; + if (mid >= elf_symbo->symbs_count) { + return -1; + } + if (target_addr >= elf_symbo->symbs[mid]->start) { + left = mid + 1; + } else { + right = mid - 1; + } + } + + if (ELF_SYMBO_ERR_INDEX(elf_symbo, right)) { + return -1; + } + return target_addr >= elf_symbo->symbs[right]->start ? (right + 1): right; +} + +static int search_symbs(u64 target_addr, char *comm, struct elf_symbo_s* elf_symbo, struct addr_symb_s* addr_symb) +{ + u64 range; + int search_index = __search_addr_upper_bound(elf_symbo, 0, elf_symbo->symbs_count, target_addr); + + // Take a step back. + search_index -= 1; + if (ELF_SYMBO_ERR_INDEX(elf_symbo, search_index)) { + return -1; + } + + range = elf_symbo->symbs[search_index]->start; + + while (!ELF_SYMBO_ERR_INDEX(elf_symbo, search_index) && target_addr >= elf_symbo->symbs[search_index]->start) { + if (target_addr < elf_symbo->symbs[search_index]->start + elf_symbo->symbs[search_index]->size) { + addr_symb->sym = elf_symbo->symbs[search_index]->symb_name; + addr_symb->offset = target_addr - elf_symbo->symbs[search_index]->start; + addr_symb->orign_addr = target_addr; + addr_symb->mod = comm; + return 0; + } + if (range > elf_symbo->symbs[search_index]->start + elf_symbo->symbs[search_index]->size) { + break; + } + // Take a step back. + search_index -= 1; + } + + return -1; +} + +static int __inc_symbs_capability(struct elf_symbo_s* elf_symbo) +{ + u32 new_capa, old_capa; + struct symb_s** new_symbs_capa; + struct symb_s** old_symbs_capa; + + old_capa = elf_symbo->symbs_capability; + new_capa = elf_symbo->symbs_capability + SYMBS_STEP_COUNT; + if (new_capa >= SYMBS_MAX_COUNT) { + return -1; + } + + old_symbs_capa = elf_symbo->__symbs; + + new_symbs_capa = (struct symb_s **)malloc(new_capa * sizeof(struct symb_s *)); + if (!new_symbs_capa) { + return -1; + } + + (void)memset(new_symbs_capa, 0, new_capa * sizeof(struct symb_s *)); + if (old_capa > 0 && old_symbs_capa != NULL) { + (void)memcpy(new_symbs_capa, old_symbs_capa, old_capa * sizeof(struct symb_s *)); + } + if (old_symbs_capa != NULL) { + (void)free(old_symbs_capa); + old_symbs_capa = NULL; + } + elf_symbo->__symbs = new_symbs_capa; + elf_symbo->symbs_capability = new_capa; + return 0; +} + +static ELF_CB_RET __add_symbs(const char *symb, u64 addr_start, u64 size, void *ctx) +{ + struct elf_symbo_s* elf_symbo = ctx; + struct symb_s* new_symb; + + if (elf_symbo->symbs_count >= elf_symbo->symbs_capability) { + if (__inc_symbs_capability(elf_symbo)) { + ERROR("[DEBUG_ELF]: Too many symbos(%s).\n", elf_symbo->file); + return ELF_SYMB_CB_ERR; + } + } + + new_symb = (struct symb_s*)malloc(sizeof(struct symb_s)); + if (!new_symb) { + return ELF_SYMB_CB_ERR; + } + + (void)memset(new_symb, 0, sizeof(struct symb_s)); + new_symb->start = addr_start; + new_symb->size = size; + new_symb->symb_name = strdup(symb); + SPLIT_NEWLINE_SYMBOL(new_symb->symb_name); + + elf_symbo->symbs[elf_symbo->symbs_count++] = new_symb; + return ELF_SYMB_CB_OK; +} + +static int load_elf_symbol(struct elf_symbo_s* elf_symbo) +{ + if (!elf_symbo->file) { + return -1; + } + + if (!access(elf_symbo->file, 0)) { + return -1; + } + + return gopher_iter_elf_file_symb((const char *)(elf_symbo->file), __add_symbs, elf_symbo); +} + +static void __symb_destroy(struct symb_s *symb) +{ + if (!symb) { + return; + } + + if (symb->symb_name) { + (void)free(symb->symb_name); + symb->symb_name = NULL; + } + return; +} + +static void destroy_elf_symbol(struct elf_symbo_s* elf_symbo) +{ + if (!elf_symbo) { + return; + } + + if (elf_symbo->file) { + (void)free(elf_symbo->file); + elf_symbo->file = NULL; + } + + for (int i = 0; i < elf_symbo->symbs_count; i++) { + __symb_destroy(elf_symbo->symbs[i]); + if (elf_symbo->symbs[i]) { + (void)free(elf_symbo->symbs[i]); + elf_symbo->symbs[i] = NULL; + } + } + if (elf_symbo->__symbs) { + (void)free(elf_symbo->__symbs); + elf_symbo->__symbs = NULL; + } + return; +} + +static struct elf_symbo_s* create_elf_symbol(struct elf_reader_s* reader, u32 inode) +{ + struct elf_symbo_s* elf_symbo = malloc(sizeof(struct elf_symbo_s)); + if (!elf_symbo) { + return NULL; + } + (void)memset(elf_symbo, 0, sizeof(struct elf_symbo_s)); + elf_symbo->i_inode = inode; + elf_symbo->refcnt += 1; + return elf_symbo; +} + +static struct elf_symbo_s* find_elf_symbol(struct elf_reader_s* reader, u32 inode) +{ + struct elf_symbo_s *item = NULL; + + H_FIND_I(reader->head, &inode, item); + return item; +} + +static int __symb_cmp(const void *a, const void *b) +{ + struct symb_s **symb1 = (struct symb_s **)a; + struct symb_s **symb2 = (struct symb_s **)b; + + return (*symb1)->start - (*symb2)->start; +} + +static int sort_elf_symbol(struct elf_symbo_s* elf_symbo) +{ + if (elf_symbo->symbs_count == 0) { + return 0; + } + qsort(elf_symbo->symbs, elf_symbo->symbs_count, sizeof(struct symb_s *), __symb_cmp); + return 0; +} + +#endif + +int get_elf_debug_file(struct elf_reader_s* reader, int pid, + const char* elf, const char* elf_link, char debug_file[], size_t len) +{ + int ret; + char debug_link[PATH_LEN]; + char debug_link_path[PATH_LEN]; + char build_id[PATH_LEN]; + char pid_root_path[PATH_LEN]; + char build_id_path[PATH_LEN * 2]; + + // step1: get pid root path. + pid_root_path[0] = 0; + if ((ret = get_pid_root_path(pid, pid_root_path, PATH_LEN)) && ret != 0) { + return ret; + } + + // step2: get elf build-id + build_id[0] = 0; + (void)gopher_get_elf_build_id(elf_link, build_id, PATH_LEN); + if (build_id[0] != 0) { + // step3: get elf build-id path, if not exist, go on... + build_id_path[0] = 0; + ret = get_build_id_path((const char *)pid_root_path, (const char *)reader->global_dbg_dir, + (const char *)build_id, build_id_path, PATH_LEN * 2); + + if (ret == 0) { + (void)strncpy(debug_file, build_id_path, len - 1); + return 0; + } + } + + // step4: get elf debug-link + debug_link[0] = 0; + (void)gopher_get_elf_debug_link(elf_link, debug_link, PATH_LEN); + if (debug_link[0] != 0) { + debug_link_path[0] = 0; + // step5: get debug-link path, if not exist, go on... + ret = get_debug_link_path((const char *)pid_root_path, (const char *)reader->global_dbg_dir, + elf, (const char *)debug_link, debug_link_path, PATH_LEN); + if (ret == 0) { + (void)strncpy(debug_file, debug_link_path, len - 1); + return 0; + } + } + + return -1; +} + +struct elf_reader_s* create_elf_reader(const char *global_dbg_dir) +{ + struct elf_reader_s* reader = malloc(sizeof(struct elf_reader_s)); + if (!reader) { + return NULL; + } + + (void)memset(reader, 0, sizeof(struct elf_reader_s)); + (void)strncpy(reader->global_dbg_dir, global_dbg_dir, PATH_LEN - 1); + return reader; +} + +void destroy_elf_reader(struct elf_reader_s* reader) +{ + if (!reader) { + return; + } + + (void)free(reader); + return; +} +#if 0 + +void rm_elf_symbol(struct elf_reader_s* reader, struct elf_symbo_s* elf_symbol) +{ + struct elf_symbo_s *item = NULL; + + if (!elf_symbol || !reader) { + return; + } + + INFO("[DEBUG_ELF]: Try to delete debug file %s.\n", item->file); + + item = find_elf_symbol(reader, elf_symbol->i_inode); + if (!item) { + return; + } + + if (item->refcnt > 0) { + item->refcnt -= 1; + } + + if (item->refcnt > 0) { + return; + } + + INFO("[DEBUG_ELF]: Succeed to delete debug file %s.\n", item->file); + + destroy_elf_symbol(item); + H_DEL(reader->head, item); + (void)free(item); + return; +} + + +struct elf_symbo_s* get_elf_symbol(struct elf_reader_s* reader, int pid, const char *elf, const char *elf_link) +{ + int ret; + u32 inode = 0; + char debug_file[PATH_LEN]; + struct elf_symbo_s *item = NULL, *new_item = NULL; + + if ((ret = get_inode(elf_link, &inode)) && ret != 0) { + return NULL; + } + + item = find_elf_symbol(reader, inode); + if (item) { + item->refcnt += 1; + INFO("[DEBUG_ELF]: Succeed to lkup debug file %s(refcnt = %u).\n", item->file, item->refcnt); + return item; + } + + debug_file[0] = 0; + if ((ret = get_elf_debug_file(reader, pid, elf, elf_link, debug_file, PATH_LEN)) && ret != 0) { + goto err; + } + + if (debug_file[0] == 0) { + goto err; + } + + new_item = create_elf_symbol(reader, inode); + if (!new_item) { + return NULL; + } + + new_item->file = strdup(debug_file); + if (new_item->file == NULL) { + goto err; + } + + if ((ret = load_elf_symbol(new_item)) && ret != 0) { + ERROR("[DEBUG_ELF]: Failed to load symbol(%s).\n", new_item->file); + goto err; + } + + (void)sort_elf_symbol(new_item); + + INFO("[DEBUG_ELF]: Succeed to create debug file %s(refcnt = %u).\n", new_item->file, new_item->refcnt); + + H_ADD_I(reader->head, i_inode, new_item); + + return new_item; +err: + if (new_item) { + destroy_elf_symbol(new_item); + (void)free(new_item); + } + return NULL; +} + +int search_elf_symbol(u64 target_addr, char *comm, struct elf_symbo_s* elf_symbo, struct addr_symb_s* addr_symb) +{ + return search_symbs(target_addr, comm, elf_symbo, addr_symb); +} +#endif diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/debug_elf_reader.h b/src/probes/extends/ebpf.probe/src/stackprobe/debug_elf_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..391d7e57e4a4b9cfad5a787cea9f44f2a9d3254a --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/debug_elf_reader.h @@ -0,0 +1,34 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: Mr.lu + * Create: 2022-08-18 + * Description: DEBUG ELF reader + ******************************************************************************/ +#ifndef __GOPHER_DEBUG_ELF_READER_H__ +#define __GOPHER_DEBUG_ELF_READER_H__ + +#pragma once + +#include "hash.h" +#include "symbol.h" + +struct elf_reader_s { + char global_dbg_dir[PATH_LEN]; // Must NOT end with '/' +}; + +struct elf_reader_s* create_elf_reader(const char *global_dbg_dir); +void destroy_elf_reader(struct elf_reader_s* reader); + +int get_elf_debug_file(struct elf_reader_s* reader, int pid, + const char* elf, const char* elf_link, char debug_file[], size_t len); + + +#endif diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/elf_symb.c b/src/probes/extends/ebpf.probe/src/stackprobe/elf_symb.c new file mode 100644 index 0000000000000000000000000000000000000000..f16060cd5141b1636f5136afeb1e561ccd42024a --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/elf_symb.c @@ -0,0 +1,381 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: luzhihao + * Create: 2022-08-22 + * Description: debug reader + ******************************************************************************/ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef BPF_PROG_KERN +#undef BPF_PROG_KERN +#endif + +#ifdef BPF_PROG_USER +#undef BPF_PROG_USER +#endif + +#include "bpf.h" +#include "container.h" +#include "gopher_elf.h" +#include "elf_symb.h" + +static struct elf_symbo_s* __head = NULL; + +#ifdef symbs +#undef symbs +#endif +#define symbs __symbs + +#if 1 +#define __STAT_INODE "/usr/bin/stat --format=%%i %s" +int __get_inode(const char *elf, u32 *inode) +{ + char command[COMMAND_LEN]; + char inode_s[INT_LEN]; + + if (access(elf, 0) != 0) { + return -1; + } + + command[0] = 0; + inode_s[0] = 0; + (void)snprintf(command, COMMAND_LEN, __STAT_INODE, elf); + + if (exec_cmd((const char *)command, inode_s, INT_LEN) < 0) { + return -1; + } + + *inode = (u32)atoi((const char *)inode_s); + return 0; +} + +static void __symb_destroy(struct symb_s *symb) +{ + if (!symb) { + return; + } + + if (symb->symb_name) { + (void)free(symb->symb_name); + symb->symb_name = NULL; + } + return; +} + +static void __destroy_elf_symbol(struct elf_symbo_s* elf_symbo) +{ + if (!elf_symbo) { + return; + } + + if (elf_symbo->elf) { + (void)free(elf_symbo->elf); + elf_symbo->elf = NULL; + } + + for (int i = 0; i < elf_symbo->symbs_count; i++) { + __symb_destroy(elf_symbo->symbs[i]); + if (elf_symbo->symbs[i]) { + (void)free(elf_symbo->symbs[i]); + elf_symbo->symbs[i] = NULL; + } + } + if (elf_symbo->symbs) { + (void)free(elf_symbo->symbs); + elf_symbo->symbs = NULL; + } + return; +} + +static int __symb_cmp(const void *a, const void *b) +{ + struct symb_s **symb1 = (struct symb_s **)a; + struct symb_s **symb2 = (struct symb_s **)b; + + return (*symb1)->start - (*symb2)->start; +} + +static int __sort_elf_symbol(struct elf_symbo_s* elf_symbo) +{ + if (elf_symbo->symbs_count == 0) { + return 0; + } + qsort(elf_symbo->symbs, elf_symbo->symbs_count, sizeof(struct symb_s *), __symb_cmp); + return 0; +} + +static struct elf_symbo_s* __lkup_elf_symb(u32 inode) +{ + struct elf_symbo_s *item = NULL; + + H_FIND_I(__head, &inode, item); + return item; +} + +static struct elf_symbo_s* __create_elf_symbol(const char* elf, u32 inode) +{ + struct elf_symbo_s* elf_symbo = malloc(sizeof(struct elf_symbo_s)); + if (!elf_symbo) { + return NULL; + } + (void)memset(elf_symbo, 0, sizeof(struct elf_symbo_s)); + elf_symbo->i_inode = inode; + elf_symbo->elf = strdup(elf); + elf_symbo->refcnt += 1; + return elf_symbo; +} + +static int __inc_symbs_capability(struct elf_symbo_s* elf_symbo) +{ + u32 new_capa, old_capa; + struct symb_s** new_symbs_capa; + struct symb_s** old_symbs_capa; + + old_capa = elf_symbo->symbs_capability; + new_capa = elf_symbo->symbs_capability + SYMBS_STEP_COUNT; + if (new_capa >= SYMBS_MAX_COUNT) { + return -1; + } + + old_symbs_capa = elf_symbo->symbs; + + new_symbs_capa = (struct symb_s **)malloc(new_capa * sizeof(struct symb_s *)); + if (!new_symbs_capa) { + return -1; + } + + (void)memset(new_symbs_capa, 0, new_capa * sizeof(struct symb_s *)); + if (old_capa > 0 && old_symbs_capa != NULL) { + (void)memcpy(new_symbs_capa, old_symbs_capa, old_capa * sizeof(struct symb_s *)); + } + if (old_symbs_capa != NULL) { + (void)free(old_symbs_capa); + old_symbs_capa = NULL; + } + elf_symbo->symbs = new_symbs_capa; + elf_symbo->symbs_capability = new_capa; + return 0; +} + +static ELF_CB_RET __add_symbs(const char *symb, u64 addr_start, u64 size, void *ctx) +{ + struct elf_symbo_s* elf_symbo = ctx; + struct symb_s* new_symb; + + if (elf_symbo->symbs_count >= elf_symbo->symbs_capability) { + if (__inc_symbs_capability(elf_symbo)) { + ERROR("[ELF_SYMBOL]: Too many symbos(%s).\n", elf_symbo->elf); + return ELF_SYMB_CB_ERR; + } + } + + new_symb = (struct symb_s*)malloc(sizeof(struct symb_s)); + if (!new_symb) { + return ELF_SYMB_CB_ERR; + } + + (void)memset(new_symb, 0, sizeof(struct symb_s)); + new_symb->start = addr_start; + new_symb->size = size; + new_symb->symb_name = strdup(symb); + SPLIT_NEWLINE_SYMBOL(new_symb->symb_name); + + elf_symbo->symbs[elf_symbo->symbs_count++] = new_symb; + return ELF_SYMB_CB_OK; +} + +static int __load_elf_symbol(struct elf_symbo_s* elf_symbo) +{ + if (!elf_symbo->elf) { + return -1; + } + +#if 0 + if (!access(elf_symbo->elf, 0)) { + return -1; + } +#endif + + return gopher_iter_elf_file_symb((const char *)(elf_symbo->elf), __add_symbs, elf_symbo); +} + + +#define __ERR_INDEX(elf_symb, index) (((index) < 0) || (elf_symb->symbs_count <= (index))) + +static int __search_addr_upper_bound(struct elf_symbo_s* elf_symb, int bgn, int end, u64 target_addr) +{ + int left = bgn, right = end, mid = 0; + + if ((bgn >= end) || (bgn < 0) || (end < 0)) { + return -1; + } + + while (left < right) { + mid = (left + right) / 2; + if (mid >= elf_symb->symbs_count) { + return -1; + } + if (target_addr >= elf_symb->symbs[mid]->start) { + left = mid + 1; + } else { + right = mid - 1; + } + } + + if (__ERR_INDEX(elf_symb, right)) { + return -1; + } + return target_addr >= elf_symb->symbs[right]->start ? (right + 1): right; +} + +static int __do_search_addr(struct elf_symbo_s* elf_symb, + u64 orign_addr, u64 target_addr, const char* comm, struct addr_symb_s* addr_symb) +{ + u64 range; + int search_index = __search_addr_upper_bound(elf_symb, 0, elf_symb->symbs_count, target_addr); + + // Take a step back. + search_index -= 1; + if (__ERR_INDEX(elf_symb, search_index)) { + return -1; + } + + range = elf_symb->symbs[search_index]->start; + + while (!__ERR_INDEX(elf_symb, search_index) && target_addr >= elf_symb->symbs[search_index]->start) { + if (target_addr < elf_symb->symbs[search_index]->start + elf_symb->symbs[search_index]->size) { + addr_symb->sym = elf_symb->symbs[search_index]->symb_name; + addr_symb->offset = target_addr - elf_symb->symbs[search_index]->start; + addr_symb->orign_addr = orign_addr; + addr_symb->mod = (char *)comm; + return 0; + } + if (range > elf_symb->symbs[search_index]->start + elf_symb->symbs[search_index]->size) { + break; + } + // Take a step back. + search_index -= 1; + } + + return -1; +} + + +#endif + +struct elf_symbo_s* get_elf_symb(const char* elf) +{ + int ret; + u32 inode; + struct elf_symbo_s* item = NULL, *new_item = NULL; + + if ((ret = __get_inode(elf, &inode)) && (ret != 0)) { + return NULL; + } + + item = __lkup_elf_symb(inode); + if (item) { + item->refcnt++; + return item; + } + + new_item = __create_elf_symbol(elf, inode); + if (!new_item) { + goto err; + } + + if ((ret = __load_elf_symbol(new_item)) && ret != 0) { + ERROR("[ELF_SYMBOL]: Failed to load symbol(%s).\n", new_item->elf); + goto err; + } + + (void)__sort_elf_symbol(new_item); + + H_ADD_I(__head, i_inode, new_item); + + //INFO("[ELF_SYMBOL]: Succeed to load elf symbs %s(symbs_count = %u).\n", new_item->elf, new_item->symbs_count); + + return new_item; +err: + if (new_item) { + __destroy_elf_symbol(new_item); + (void)free(new_item); + } + return NULL; +} + +void rm_elf_symb(struct elf_symbo_s* elf_symb) +{ + struct elf_symbo_s *item = NULL; + + if (!elf_symb) { + return; + } + + item = __lkup_elf_symb(elf_symb->i_inode); + if (!item) { + return; + } + + if (item->refcnt > 0) { + item->refcnt -= 1; + } + + if (item->refcnt > 0) { + return; + } + + //INFO("[ELF_SYMBOL]: Succeed to delete elf %s.\n", item->elf); + + __destroy_elf_symbol(item); + H_DEL(__head, item); + (void)free(item); + return; +} + +int search_elf_symb(struct elf_symbo_s* elf_symb, + u64 orign_addr, u64 target_addr, const char* comm, struct addr_symb_s* addr_symb) +{ + if (elf_symb == NULL) { + return -1; + } + + return __do_search_addr(elf_symb, orign_addr, target_addr, comm, addr_symb); +} + +void deinit_elf_symbs(void) +{ + struct elf_symbo_s *item, *tmp; + + if (!__head) { + return; + } + + H_ITER(__head, item, tmp) { + __destroy_elf_symbol(item); + H_DEL(__head, item); + (void)free(item); + } + __head = NULL; + return; +} diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/elf_symb.h b/src/probes/extends/ebpf.probe/src/stackprobe/elf_symb.h new file mode 100644 index 0000000000000000000000000000000000000000..3dbdb49dc63a14fb04ba6c6e4ba71ec558e8b54d --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/elf_symb.h @@ -0,0 +1,28 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: Mr.lu + * Create: 2022-08-18 + * Description: elf symb + ******************************************************************************/ +#ifndef __GOPHER_ELF_SYMB_H__ +#define __GOPHER_ELF_SYMB_H__ + +#pragma once + +#include "symbol.h" + +struct elf_symbo_s* get_elf_symb(const char* elf); +void rm_elf_symb(struct elf_symbo_s* elf_symb); +int search_elf_symb(struct elf_symbo_s* elf_symb, + u64 orign_addr, u64 target_addr, const char* comm, struct addr_symb_s* addr_symb); +void deinit_elf_symbs(void); + +#endif diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/flame_graph.c b/src/probes/extends/ebpf.probe/src/stackprobe/flame_graph.c new file mode 100644 index 0000000000000000000000000000000000000000..cde3d21368222d85c794c9e1552a373fb5569aee --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/flame_graph.c @@ -0,0 +1,261 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: luzhihao + * Create: 2022-08-22 + * Description: flame_graph prog + ******************************************************************************/ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef BPF_PROG_KERN +#undef BPF_PROG_KERN +#endif + +#ifdef BPF_PROG_USER +#undef BPF_PROG_USER +#endif + +#include "bpf.h" +#include "flame_graph.h" + +#if 1 + +static char __test_flame_graph_flags(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type, u32 flags) +{ + struct stack_flamegraph_s *sfg; + + sfg = &(svg_mng->flame_graphs[en_type]); + if (sfg->flags & flags) { + return 1; + } + return 0; +} + +static void __set_flame_graph_flags(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type, u32 flags) +{ + struct stack_flamegraph_s *sfg; + + sfg = &(svg_mng->flame_graphs[en_type]); + sfg->flags |= flags; + return; +} + +static void __reset_flame_graph_flags(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type, u32 flags) +{ + struct stack_flamegraph_s *sfg; + + sfg = &(svg_mng->flame_graphs[en_type]); + sfg->flags &= flags; + return; +} + +static FILE * __open_flame_graph_fp(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type) +{ + struct stack_flamegraph_s *sfg; + + sfg = &(svg_mng->flame_graphs[en_type]); + if (sfg->fp) { + (void)pclose(sfg->fp); + sfg->fp = NULL; + } + sfg->fp = fopen(sfg->flame_graph_file, "a+"); + if (sfg->fp == NULL) { + ERROR("[FLAMEGRAPH]: open file failed.(%s)\n", sfg->flame_graph_file); + } + return sfg->fp; +} + +static FILE * __get_flame_graph_fp(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type) +{ + struct stack_flamegraph_s *sfg; + + sfg = &(svg_mng->flame_graphs[en_type]); + return sfg->fp; +} + +static void __mkdir_flame_graph_path(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type) +{ + FILE *fp; + struct stack_flamegraph_s *sfg; + char commad[COMMAND_LEN]; + + sfg = &(svg_mng->flame_graphs[en_type]); + commad[0] = 0; + (void)snprintf(commad, COMMAND_LEN, "/usr/bin/mkdir -p %s", sfg->flame_graph_dir ?: "/"); + fp = popen(commad, "r"); + if (fp != NULL) { + (void)pclose(fp); + } + return; +} + +static char* __get_flame_graph_file(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type) +{ + struct stack_flamegraph_s *sfg; + + sfg = &(svg_mng->flame_graphs[en_type]); + return sfg->flame_graph_file; +} + +static void __flush_flame_graph_file(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type) +{ + struct stack_flamegraph_s *sfg; + + sfg = &(svg_mng->flame_graphs[en_type]); + if (sfg->fp) { + (void)fflush(sfg->fp); + } + return; +} + +static void __set_flame_graph_file(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type) +{ + const char *fmt = "%s/tmp_%s"; + struct stack_flamegraph_s *sfg; + + sfg = &(svg_mng->flame_graphs[en_type]); + sfg->flame_graph_file[0] = 0; + (void)snprintf(sfg->flame_graph_file, PATH_LEN, fmt, sfg->flame_graph_dir ?: "", get_cur_time()); + return; +} + +static void __rm_flame_graph_file(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type) +{ +#define __COMMAND_LEN (2 * PATH_LEN) + FILE *fp; + char commad[__COMMAND_LEN]; + struct stack_flamegraph_s *sfg; + + sfg = &(svg_mng->flame_graphs[en_type]); + + if (!access(sfg->flame_graph_file, 0)) { + commad[0] = 0; + (void)snprintf(commad, __COMMAND_LEN, "/usr/bin/rm -f %s", sfg->flame_graph_file); + fp = popen(commad, "r"); + if (fp != NULL) { + (void)pclose(fp); + fp = NULL; + } + } + if (sfg->fp) { + (void)fclose(sfg->fp); + sfg->fp = NULL; + } +} + +static void __reopen_flame_graph_file(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type) +{ + __rm_flame_graph_file(svg_mng, en_type); + __set_flame_graph_file(svg_mng, en_type); + (void)__open_flame_graph_fp(svg_mng, en_type); + __set_flame_graph_flags(svg_mng, en_type, FLAME_GRAPH_NEW); +} + +#define HISTO_TMP_LEN (2 * STACK_SYMBS_LEN) +static char __histo_tmp_str[HISTO_TMP_LEN]; +static int __do_wr_stack_histo(struct stack_svg_mng_s *svg_mng, enum stack_svg_type_e en_type, struct stack_trace_histo_s *stack_trace_histo, int first) +{ + FILE * fp = __get_flame_graph_fp(svg_mng, en_type); + if (!fp) { + ERROR("[FLAMEGRAPH]: Invalid fp.\n"); + return -1; + } + + __histo_tmp_str[0] = 0; + + if (first) { + (void)snprintf(__histo_tmp_str, HISTO_TMP_LEN, "%s %llu", + stack_trace_histo->stack_symbs_str, stack_trace_histo->count); + } else { + (void)snprintf(__histo_tmp_str, HISTO_TMP_LEN, "\n%s %llu", + stack_trace_histo->stack_symbs_str, stack_trace_histo->count); + } + (void)fputs(__histo_tmp_str, fp); + return 0; +} + +static void __do_wr_flamegraph(struct stack_svg_mng_s *svg_mng, struct stack_trace_histo_s *head, enum stack_svg_type_e en_type) +{ + int first_flag = 0; + + if (__test_flame_graph_flags(svg_mng, en_type, FLAME_GRAPH_NEW)) { + first_flag = 1; + } + + struct stack_trace_histo_s *item, *tmp; + + H_ITER(head, item, tmp) { + (void)__do_wr_stack_histo(svg_mng, en_type, item, first_flag); + first_flag = 0; + } + + __flush_flame_graph_file(svg_mng, en_type); + __reset_flame_graph_flags(svg_mng, en_type, ~FLAME_GRAPH_NEW); +} + +#endif + +void wr_flamegraph(struct stack_svg_mng_s *svg_mng, struct stack_trace_histo_s *head, enum stack_svg_type_e en_type) +{ + __do_wr_flamegraph(svg_mng, head, en_type); + if (is_svg_tmout(svg_mng, en_type)) { + (void)create_svg_file(svg_mng, + en_type, + __get_flame_graph_file(svg_mng, en_type)); + + __reopen_flame_graph_file(svg_mng, en_type); + } +} + +int set_flame_graph_path(struct stack_svg_mng_s *svg_mng, const char* path, enum stack_svg_type_e en_type) +{ + size_t len; + char dir[PATH_LEN]; + struct stack_flamegraph_s *sfg; + + len = strlen(path); + if (len == 0 || len >= PATH_LEN) { + return -1; + } + + if (len == 1 && path[0] == '/') { + return 0; + } + + dir[0] = 0; + if (path[0] == '/') { + (void)strncpy(dir, path, len - 1); + } else { + (void)strncpy(dir, path, PATH_LEN - 1); + } + sfg = &(svg_mng->flame_graphs[en_type]); + sfg->flame_graph_dir = strdup(dir); + + __mkdir_flame_graph_path(svg_mng, en_type); + __set_flame_graph_file(svg_mng, en_type); + if (__open_flame_graph_fp(svg_mng, en_type) == NULL) { + return -1; + } + __set_flame_graph_flags(svg_mng, en_type, FLAME_GRAPH_NEW); + return 0; +} + diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/flame_graph.h b/src/probes/extends/ebpf.probe/src/stackprobe/flame_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..e70b0c9008b402af27ea807afe17ce2f15dcb7bf --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/flame_graph.h @@ -0,0 +1,26 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: Mr.lu + * Create: 2022-08-18 + * Description: flame graph defined + ******************************************************************************/ +#ifndef __GOPHER_FLAME_GRAPH_H__ +#define __GOPHER_FLAME_GRAPH_H__ + +#pragma once + +#include "svg.h" +#include "stackprobe.h" + +void wr_flamegraph(struct stack_svg_mng_s *svg_mng, struct stack_trace_histo_s *head, enum stack_svg_type_e en_type); +int set_flame_graph_path(struct stack_svg_mng_s *svg_mng, const char* path, enum stack_svg_type_e en_type); + +#endif diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/oncpu.bpf.c b/src/probes/extends/ebpf.probe/src/stackprobe/oncpu.bpf.c new file mode 100644 index 0000000000000000000000000000000000000000..82551167b841599a7e9e707b237a615816589e87 --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/oncpu.bpf.c @@ -0,0 +1,146 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: luzhihao + * Create: 2022-08-13 + * Description: function stack tracing + ******************************************************************************/ +#ifdef BPF_PROG_USER +#undef BPF_PROG_USER +#endif +#define BPF_PROG_KERN +#include "bpf.h" +#include "stack.h" + +char g_linsence[] SEC("license") = "GPL"; + +#ifndef BPF_F_FAST_STACK_CMP +#define BPF_F_FAST_STACK_CMP (1ULL << 9) +#endif + +#ifndef BPF_F_USER_STACK +#define BPF_F_USER_STACK (1ULL << 8) +#endif + +#ifndef BPF_F_INDEX_MASK +#define BPF_F_INDEX_MASK 0xffffffffULL +#endif + +#ifndef BPF_F_CURRENT_CPU +#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK +#endif + +#define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP) +#define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK) + +/* + To ensure that BPF continuously collects stack-trace data, BPF provides two data channels (A/B). + One data channel is used to collect stack-trace data, and the other is used to read stack-trace data in user mode. + Two data channel periodically alternate roles. +*/ + +struct bpf_map_def SEC("maps") convert_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(u32), // const value 0 + .value_size = sizeof(u64), // convert counter + .max_entries = 1, +}; + +/* Data channel A */ +struct bpf_map_def SEC("maps") stackmap_a = { + .type = BPF_MAP_TYPE_STACK_TRACE, + .key_size = sizeof(u32), + .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64), + .max_entries = PERCPU_SAMPLE_COUNT, +}; + +struct bpf_map_def SEC("maps") stackmap_perf_a = { + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(u32), + .max_entries = MAX_CPU, +}; + +/* Data channel B */ +struct bpf_map_def SEC("maps") stackmap_b = { + .type = BPF_MAP_TYPE_STACK_TRACE, + .key_size = sizeof(u32), + .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64), + .max_entries = PERCPU_SAMPLE_COUNT, +}; + +struct bpf_map_def SEC("maps") stackmap_perf_b = { + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(u32), + .max_entries = MAX_CPU, +}; + +static __always_inline u64 get_real_start_time() +{ + struct task_struct* task = (struct task_struct*)bpf_get_current_task(); + if (task) { + struct task_struct* group_leader = _(task->group_leader); + if (group_leader) { +#if (CURRENT_KERNEL_VERSION >= KERNEL_VERSION(5, 10, 0)) + return _(group_leader->start_boottime); +#else + return _(group_leader->real_start_time); +#endif + } + } + return 0; +} + +bpf_section("perf_event") +int function_stack_trace(struct bpf_perf_event_data *ctx) +{ + struct stack_id_s stack_id = {0}; + const u32 zero = 0; + u64 *convert_count = bpf_map_lookup_elem(&convert_map, &zero); + if (!convert_count) { + return -1; + } + + // Obtains the data channel used to collect stack-trace data. + char is_stackmap_a = ((*convert_count % 2) == 0); + + stack_id.pid.proc_id = bpf_get_current_pid_tgid() >> INT_LEN; + if (stack_id.pid.proc_id > 1) { + struct proc_s obj = {.proc_id = stack_id.pid.proc_id}; + if (!is_proc_exist(&obj)) { + return 0; + } + } + stack_id.pid.real_start_time = get_real_start_time(); + (void)bpf_get_current_comm(&stack_id.pid.comm, sizeof(stack_id.pid.comm)); + + if (is_stackmap_a) { + stack_id.kern_stack_id = bpf_get_stackid(ctx, &stackmap_a, KERN_STACKID_FLAGS); + stack_id.user_stack_id = bpf_get_stackid(ctx, &stackmap_a, USER_STACKID_FLAGS); + } else { + stack_id.kern_stack_id = bpf_get_stackid(ctx, &stackmap_b, KERN_STACKID_FLAGS); + stack_id.user_stack_id = bpf_get_stackid(ctx, &stackmap_b, USER_STACKID_FLAGS); + } + + if (stack_id.kern_stack_id < 0 && stack_id.user_stack_id < 0) { + // error. + return -1; + } + + if (is_stackmap_a) { + (void)bpf_perf_event_output(ctx, &stackmap_perf_a, BPF_F_CURRENT_CPU, &stack_id, sizeof(stack_id)); + } else { + (void)bpf_perf_event_output(ctx, &stackmap_perf_b, BPF_F_CURRENT_CPU, &stack_id, sizeof(stack_id)); + } + + return 0; +} + diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/stack.h b/src/probes/extends/ebpf.probe/src/stackprobe/stack.h new file mode 100644 index 0000000000000000000000000000000000000000..0439a1a49dc5ce7a29ab03cf57ff1366ae300aae --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/stack.h @@ -0,0 +1,42 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: Mr.lu + * Create: 2022-08-18 + * Description: stack defined + ******************************************************************************/ +#ifndef __GOPHER_STACK_H__ +#define __GOPHER_STACK_H__ + +#pragma once + +#include "common.h" + +#define AGGRE_PERIOD (1 * 60 * 1000) // 1min +#define SAMPLE_PERIOD (10) // 10ms +#define TMOUT_PERIOD (AGGRE_PERIOD / 1000) // Second as unit +#define PROC_CACHE_MAX_COUNT 10 // Cache 10 proc symbols +#define DIV_ROUND_UP(NUM, DEN) ((NUM + DEN - 1) / DEN) + +#define PERCPU_SAMPLE_COUNT (2 * DIV_ROUND_UP(AGGRE_PERIOD, SAMPLE_PERIOD)) + +struct stack_pid_s { + u64 real_start_time; + int proc_id; + char comm[TASK_COMM_LEN]; +}; + +struct stack_id_s { + int user_stack_id; + int kern_stack_id; + struct stack_pid_s pid; +}; + +#endif diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/stackprobe.c b/src/probes/extends/ebpf.probe/src/stackprobe/stackprobe.c new file mode 100644 index 0000000000000000000000000000000000000000..ba64ee313cf4da4fcf4f1d7b8479f4de76894173 --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/stackprobe.c @@ -0,0 +1,1012 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: luzhihao + * Create: 2022-08-22 + * Description: stack probe user prog + ******************************************************************************/ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#ifdef BPF_PROG_KERN +#undef BPF_PROG_KERN +#endif + +#ifdef BPF_PROG_USER +#undef BPF_PROG_USER +#endif + +#include "bpf.h" +#include "args.h" +#include "logs.h" +#include "syscall.h" +#include "symbol.h" +#include "flame_graph.h" +#include "debug_elf_reader.h" +#include "elf_symb.h" +#include "stackprobe.h" + +#define ON_CPU_PROG "./oncpu.bpf.o" +#define IS_IEG_ADDR(addr) ((addr) != 0xcccccccccccccccc && (addr) != 0xffffffffffffffff) + +#define BPF_GET_MAP_FD(obj, map_name) \ + ({ \ + int __fd = -1; \ + struct bpf_map *__map = bpf_object__find_map_by_name((obj), (map_name)); \ + if (__map) { \ + __fd = bpf_map__fd(__map); \ + } \ + __fd; \ + }) + +#define BPF_PIN_MAP_PATH(obj, map_name, path) \ + ({ \ + int __ret = -1; \ + struct bpf_map *__map = bpf_object__find_map_by_name((obj), (map_name)); \ + if (__map) { \ + __ret = bpf_map__set_pin_path(__map, path); \ + } \ + __ret; \ + }) + +static struct probe_params params = {.period = DEFAULT_PERIOD}; +static volatile sig_atomic_t g_stop; + +static struct satck_trace_s *g_st = NULL; + +static void sig_int(int signo) +{ + g_stop = 1; +} + +#if 1 + +int stacktrace_create_log_mgr(struct satck_trace_s *st) +{ + struct log_mgr_s* mgr = create_log_mgr(NULL, 0, 0); + if (!mgr) { + return -1; + } + + (void)strncpy(mgr->debug_path, st->stack_params.logs, PATH_LEN - 1); + + if (init_log_mgr(mgr, 0)) { + return -1; + } + + st->log_mgr = (void *)mgr; + + return 0; +} + +void stacktrace_destroy_log_mgr(struct satck_trace_s *st) +{ + if (!st->log_mgr) { + return; + } + + destroy_log_mgr(st->log_mgr); + return; +} + +#endif + +#if 1 +static int get_stack_map_fd(struct satck_trace_s *st) +{ + if (st->is_stackmap_a) { + return st->stackmap_a_fd; + } else { + return st->stackmap_b_fd; + } +} + +static struct perf_buffer* get_pb(struct satck_trace_s *st) +{ + if (st->is_stackmap_a) { + return st->pb_a; + } else { + return st->pb_b; + } +} + +#endif + +#if 1 // Proc cache + +static void __destroy_proc_cache(struct proc_cache_s *proc_cache) +{ + if (!proc_cache || !proc_cache->proc_symbs) { + return; + } + + proc_delete_all_symbs(proc_cache->proc_symbs); + proc_cache->proc_symbs = NULL; + return; +} + +static void destroy_proc_cache_tbl(struct satck_trace_s *st) +{ + if (!st || !st->proc_cache) { + return; + } + + struct proc_cache_s *proc_hash_tbl = st->proc_cache; + struct proc_cache_s *item, *tmp; + + H_ITER(proc_hash_tbl, item, tmp) { + __destroy_proc_cache(item); + H_DEL(proc_hash_tbl, item); + (void)free(item); + } + st->proc_cache = NULL; + (void)memset(st->proc_cache_mirro, 0, sizeof(struct proc_cache_s *) * PROC_CACHE_MAX_COUNT); + return; +} + +static int __aging_proc_cache(struct satck_trace_s *st, struct proc_cache_s *aging_item) +{ + struct proc_cache_s *item = NULL; + H_FIND(st->proc_cache, &(aging_item->k), sizeof(struct stack_pid_s), item); + if (item) { + st->stats.count[STACK_STATS_PCACHE_DEL]++; + __destroy_proc_cache(item); + H_DEL(st->proc_cache, item); + (void)free(item); + return 0; + } + return -1; +} + +static int __add_proc_cache_mirro(struct satck_trace_s *st, struct proc_cache_s *new_item) +{ + struct proc_cache_s *aging_item; + if (st->proc_cache_mirro_count < PROC_CACHE_MAX_COUNT) { + st->proc_cache_mirro[st->proc_cache_mirro_count] = new_item; + st->proc_cache_mirro_count++; + return 0; + } + + aging_item = st->proc_cache_mirro[0]; // Aging based on the creation timing + for (int i = 1; i < PROC_CACHE_MAX_COUNT; i++) { + st->proc_cache_mirro[i - 1] = st->proc_cache_mirro[i]; + } + + st->proc_cache_mirro[PROC_CACHE_MAX_COUNT - 1] = new_item; + return __aging_proc_cache(st, aging_item); +} + +static struct proc_cache_s* __search_proc_cache(struct satck_trace_s *st, struct stack_pid_s *stack_pid) +{ + struct proc_cache_s *item = NULL; + H_FIND(st->proc_cache, stack_pid, sizeof(struct stack_pid_s), item); + return item; +} + +static struct proc_cache_s* __create_proc_cache(struct satck_trace_s *st, struct stack_pid_s *stack_pid) +{ + struct proc_cache_s *new_item; + struct proc_symbs_s* proc_symbs; + + proc_symbs = proc_load_all_symbs(st->elf_reader, stack_pid->proc_id, stack_pid->comm); + if (!proc_symbs) { + return NULL; + } + + new_item = (struct proc_cache_s *)malloc(sizeof(struct proc_cache_s)); + if (!new_item) { + return NULL; + } + + (void)memcpy(&new_item->k, stack_pid, sizeof(struct stack_pid_s)); + new_item->proc_symbs = proc_symbs; + H_ADD_KEYPTR(st->proc_cache, &new_item->k, sizeof(struct stack_pid_s), new_item); + st->stats.count[STACK_STATS_PCACHE_CRT]++; + + if (__add_proc_cache_mirro(st, new_item)) { + // The program continues. + ERROR("[STACKPROBE]: Proc cache add failed.\n"); + } + return new_item; +} + +static int search_user_addr_symb(struct satck_trace_s *st, + struct stack_pid_s *stack_pid, u64 addr, struct addr_symb_s *addr_symb) +{ + struct proc_cache_s* proc_cache; + + proc_cache = __search_proc_cache(st, stack_pid); + if (!proc_cache) { + proc_cache = __create_proc_cache(st, stack_pid); + } + + if (!proc_cache || !proc_cache->proc_symbs) { + return -1; + } + + return proc_search_addr_symb(proc_cache->proc_symbs, addr, addr_symb); +} + +#endif + +#if 1 + +static void clear_raw_stack_trace(struct satck_trace_s *st) +{ + if (!st || !st->raw_stack_traces) { + return; + } + + st->raw_stack_traces->raw_trace_count = 0; +} + +static void destroy_raw_stack_trace(struct satck_trace_s *st) +{ + if (!st || !st->raw_stack_traces) { + return; + } + + (void)free(st->raw_stack_traces); + st->raw_stack_traces = NULL; + return; +} + +static struct raw_stack_trace_s *create_raw_stack_trace(struct satck_trace_s *st) +{ + struct raw_stack_trace_s *raw_stack_trace; + + size_t stack_size = st->cpus_num * PERCPU_SAMPLE_COUNT; + size_t mem_size = sizeof(struct raw_stack_trace_s); + mem_size += (stack_size * sizeof(struct stack_id_s)); + + raw_stack_trace = (struct raw_stack_trace_s *)malloc(mem_size); + if (!raw_stack_trace) { + return NULL; + } + (void)memset(raw_stack_trace, 0, mem_size); + raw_stack_trace->stack_size = stack_size; + return raw_stack_trace; +} + +static int add_raw_stack_id(struct satck_trace_s *st, struct stack_id_s *raw_stack_id) +{ + if (!st || !st->raw_stack_traces) { + return -1; + } + struct raw_stack_trace_s *raw_stack_traces = st->raw_stack_traces; + + if (raw_stack_traces->raw_trace_count >= raw_stack_traces->stack_size) { + return -1; + } + + (void)memcpy(&(raw_stack_traces->raw_traces[raw_stack_traces->raw_trace_count]), + raw_stack_id, sizeof(struct stack_id_s)); + raw_stack_traces->raw_trace_count++; + return 0; +} + +#endif + +#if 1 +static int __stack_addrsymbs2string(struct addr_symb_s *addr_symb, int first, char *p, int size) +{ + int ret; + char *symb; + if (size <= 0) { + return -1; + } + +#if 1 + symb = addr_symb->sym ?: addr_symb->mod; + if (first) { + ret = snprintf(p, (size_t)size, "%s", symb); + } else { + ret = snprintf(p, (size_t)size, "; %s", symb); + } + +#else + symb = addr_symb->sym; + if (symb) { + if (first) { + ret = snprintf(p, (size_t)size, "%s", symb); + } else { + ret = snprintf(p, (size_t)size, "; %s", symb); + } + } else { + if (first) { + ret = snprintf(p, (size_t)size, "0x%llx", addr_symb->orign_addr); + } else { + ret = snprintf(p, (size_t)size, "; 0x%llx", addr_symb->orign_addr); + } + } +#endif + return (ret > 0 && ret < size) ? (ret) : -1; +} + +static int __satck_symbs2string(struct stack_symbs_s *stack_symbs, char symbos_str[], size_t size) +{ + int len; + int first_flag = 1; + int remain_len = size; + char *pos = symbos_str; + struct addr_symb_s *addr_symb; + + for (int i = 0; i < PERF_MAX_STACK_DEPTH; i++) { + addr_symb = &(stack_symbs->user_stack_symbs[i]); + if (addr_symb->orign_addr != 0) { + len = __stack_addrsymbs2string(addr_symb, first_flag, pos, remain_len); + if (len < 0) { + return -1; + } + + remain_len -= len; + pos += len; + first_flag = 0; + } + } + + for (int i = 0; i < PERF_MAX_STACK_DEPTH; i++) { + addr_symb = &(stack_symbs->kern_stack_symbs[i]); + if (addr_symb->orign_addr != 0) { + len = __stack_addrsymbs2string(addr_symb, first_flag, pos, remain_len); + if (len < 0) { + return -1; + } + + remain_len -= len; + pos += len; + first_flag = 0; + } + } + + return 0; +} + +static int add_stack_histo(struct satck_trace_s *st, struct stack_symbs_s *stack_symbs) +{ + char str[STACK_SYMBS_LEN]; + struct stack_trace_histo_s *item = NULL, *new_item; + + str[0] = 0; + if (__satck_symbs2string(stack_symbs, str, STACK_SYMBS_LEN)) { + // Statistic error, but program continues + st->stats.count[STACK_STATS_HISTO_ERR]++; + } + + if (str[0] == 0) { +#ifdef GOPHER_DEBUG + ERROR("[STACKPROBE]: symbs2str is null(proc = %d, comm = %s).\n", + stack_symbs->pid.proc_id, stack_symbs->pid.comm); +#endif + return -1; + } + + H_FIND_S(st->oncpu_histo_tbl, str, item); + if (item) { + st->stats.count[STACK_STATS_HISTO_FOLDED]++; + item->count++; + return 0; + } + + new_item = (struct stack_trace_histo_s *)malloc(sizeof(struct stack_trace_histo_s)); + if (!new_item) { + return -1; + } + + new_item->stack_symbs_str[0] = 0; + (void)strncpy(new_item->stack_symbs_str, str, STACK_SYMBS_LEN - 1); + new_item->count = 1; + H_ADD_S(st->oncpu_histo_tbl, stack_symbs_str, new_item); + return 0; +} + +static void clear_stack_histo(struct satck_trace_s *st) +{ + if (!st || !st->oncpu_histo_tbl) { + return; + } + + struct stack_trace_histo_s *stack_trace_histo_tbl = st->oncpu_histo_tbl; + struct stack_trace_histo_s *item, *tmp; + + H_ITER(stack_trace_histo_tbl, item, tmp) { + H_DEL(stack_trace_histo_tbl, item); + (void)free(item); + } + st->oncpu_histo_tbl = NULL; +} + +#endif + +#if 1 +static int stack_id2symbs_user(struct satck_trace_s *st, struct stack_id_s *stack_id, + struct addr_symb_s usr_stack_symbs[], size_t size) +{ + int index = 0; + u64 ip[PERF_MAX_STACK_DEPTH] = {0}; + int fd = get_stack_map_fd(st); + + if (bpf_map_lookup_elem(fd, &(stack_id->user_stack_id), ip) != 0) { +#ifdef GOPHER_DEBUG + ERROR("[STACKPROBE]: Failed to id2symbs user stack(map_lkup).\n"); +#endif + st->stats.count[STACK_STATS_MAP_LKUP_ERR]++; + return -1; + } + + for (int i = PERF_MAX_STACK_DEPTH - 1; (i >= 0 && index < size); i--) { + if (ip[i] != 0 && IS_IEG_ADDR(ip[i])) { + if (search_user_addr_symb(st, &(stack_id->pid), ip[i], &(usr_stack_symbs[index]))) { +#ifdef GOPHER_DEBUG + ERROR("[STACKPROBE]: Failed to id2symbs user stack(%s[0x%llx]).\n", + stack_id->pid.comm, ip[i]); +#endif + st->stats.count[STACK_STATS_USR_ADDR_ERR]++; + usr_stack_symbs[index].mod = stack_id->pid.comm; + } else { + st->stats.count[STACK_STATS_USR_ADDR]++; + } + index++; + } + } + return 0; +} + +#define __CPU_IDLE "do_idle" +static char __is_cpu_idle(struct addr_symb_s *addr_symb) +{ + if (addr_symb && addr_symb->sym && !strcmp(addr_symb->sym, __CPU_IDLE)) { + return 1; + } + return 0; +} + +static int stack_id2symbs_kern(struct satck_trace_s *st, u32 kern_stack_id, + struct addr_symb_s kern_stack_symbs[], size_t size) +{ + int index = 0; + u64 ip[PERF_MAX_STACK_DEPTH] = {0}; + int fd = get_stack_map_fd(st); + + if (bpf_map_lookup_elem(fd, &kern_stack_id, ip) != 0) { +#ifdef GOPHER_DEBUG + ERROR("[STACKPROBE]: Failed to id2symbs kern stack(stack_id = %u).\n", kern_stack_id); +#endif + st->stats.count[STACK_STATS_MAP_LKUP_ERR]++; + return -1; + } + + for (int i = PERF_MAX_STACK_DEPTH - 1; (i >= 0 && index < size); i--) { + if (ip[i] != 0 && IS_IEG_ADDR(ip[i])) { + if (search_kern_addr_symb(st->ksymbs, ip[i], &(kern_stack_symbs[index]))) { +#ifdef GOPHER_DEBUG + ERROR("[STACKPROBE]: Failed to id2symbs kern stack(0x%llx).\n", ip[i]); + st->stats.count[STACK_STATS_KERN_ADDR_ERR]++; +#endif + } else { + st->stats.count[STACK_STATS_KERN_ADDR]++; + } + + if (__is_cpu_idle(&kern_stack_symbs[index])) { + return 1; // ignore cpu idle + } + + index++; + } + } + return 0; +} + +static int stack_id2symbs(struct satck_trace_s *st, struct stack_id_s *stack_id, struct stack_symbs_s *stack_symbs) +{ + int ret; + (void)memcpy(&(stack_symbs->pid), &(stack_id->pid), sizeof(struct stack_pid_s)); + + if (stack_id->kern_stack_id >= 0) { + ret = stack_id2symbs_kern(st, stack_id->kern_stack_id, + stack_symbs->kern_stack_symbs, PERF_MAX_STACK_DEPTH); + if (ret) { + return ret; + } + } + + if (stack_id->user_stack_id >= 0) { + if (stack_id2symbs_user(st, stack_id, + stack_symbs->user_stack_symbs, PERF_MAX_STACK_DEPTH)) { + return -1; + } + } + + if ((stack_id->user_stack_id >= 0) && (stack_id->kern_stack_id >= 0)) { + st->stats.count[STACK_STATS_USR_KERN_ADDR]++; + } + + return 0; +} + +static u64 __stack_count_symb(struct satck_trace_s *st) +{ + int i; + u64 count = 0; + struct mod_s* mod; + struct proc_cache_s *item, *tmp; + + H_ITER(st->proc_cache, item, tmp) { + if (!item->proc_symbs) { + continue; + } + + for (i = 0; i < item->proc_symbs->mods_count; i++) { + mod = item->proc_symbs->mods[i]; + if (mod && mod->mod_symbs) { + count += (u64)mod->mod_symbs->symbs_count; + } + + if (mod && mod->debug_symbs) { + count += (u64)mod->debug_symbs->symbs_count; + } + } + } + return count; +} + +static int stack_id2histogram(struct satck_trace_s *st) +{ + int ret; + struct stack_id_s *stack_id; + struct stack_symbs_s stack_symbs; + + if (!st->raw_stack_traces) { + return -1; + } + + for (int i = 0; i < st->raw_stack_traces->raw_trace_count; i++) { + stack_id = &(st->raw_stack_traces->raw_traces[i]); + (void)memset(&stack_symbs, 0, sizeof(stack_symbs)); + ret = stack_id2symbs(st, stack_id, &stack_symbs); + if (ret > 0) { + continue; + } + if (ret < 0) { + return -1; + } + st->stats.count[STACK_STATS_ID2SYMBS]++; + (void)add_stack_histo(st, &stack_symbs); + } + + st->stats.count[STACK_STATS_P_CACHE] = H_COUNT(st->proc_cache); + st->stats.count[STACK_STATS_SYMB_CACHE] = __stack_count_symb(st); + return 0; +} + +#endif + +#if 1 +#define __SVG_TMOUT (3 * 60) // 3min +#define __SVG_DEFAULT_DIR "/var/log/gala-gopher/stacktrace" +#define __FLAME_GRAPH_DEFAULT_DIR "/var/log/gala-gopher/flamegraph" +#define __ELF_DEBUG_DIR "/usr/lib/debug" +#define __GOPHER_LOGS_FILE "/var/log/gala-gopher/stacktrace/logs" + +static void init_stack_params(struct stack_param_s *stack_params) +{ + struct flame_graph_param_s *param; + + (void)strncpy(stack_params->logs, __GOPHER_LOGS_FILE, PATH_LEN - 1); + (void)strncpy(stack_params->debug_dir, __ELF_DEBUG_DIR, PATH_LEN - 1); + stack_params->period = __SVG_TMOUT; + + param = &(stack_params->params[STACK_SVG_ONCPU]); + (void)strncpy(param->svg_dir, __SVG_DEFAULT_DIR, PATH_LEN - 1); + (void)strncpy(param->flame_graph, __FLAME_GRAPH_DEFAULT_DIR, PATH_LEN - 1); + + param = &(stack_params->params[STACK_SVG_OFFCPU]); + (void)strncpy(param->svg_dir, __SVG_DEFAULT_DIR, PATH_LEN - 1); + (void)strncpy(param->flame_graph, __FLAME_GRAPH_DEFAULT_DIR, PATH_LEN - 1); + + param = &(stack_params->params[STACK_SVG_IO]); + (void)strncpy(param->svg_dir, __SVG_DEFAULT_DIR, PATH_LEN - 1); + (void)strncpy(param->flame_graph, __FLAME_GRAPH_DEFAULT_DIR, PATH_LEN - 1); + + return; +} +#endif + +static char is_tmout(struct satck_trace_s *st) +{ + time_t current = (time_t)time(NULL); + time_t secs; + + if (current > st->running_times) { + secs = current - st->running_times; + if (secs >= TMOUT_PERIOD) { + st->running_times = current; + return 1; + } + } + return 0; +} + +static void process_loss_data(void *ctx, int cpu, u64 cnt) +{ + if (!g_st || !g_st->raw_stack_traces) { + return; + } + g_st->stats.count[STACK_STATS_LOSS] += cnt; +} + +static void process_raw_stack_trace(void *ctx, int cpu, void *data, u32 size) +{ + if (!g_st || !g_st->raw_stack_traces || !data) { + return; + } + + if (add_raw_stack_id(g_st, (struct stack_id_s *)data)) { + g_st->stats.count[STACK_STATS_LOSS]++; + } else { + g_st->stats.count[STACK_STATS_RAW]++; + } + + return; +} + +static void destroy_stack_trace(struct satck_trace_s **ptr_st) +{ + struct satck_trace_s *st = *ptr_st; + + *ptr_st = NULL; + if (!st) { + return; + } + + if (st->obj) { + bpf_object__close(st->obj); + } + + for (int cpu = 0; cpu < st->cpus_num; cpu++) { + if (st->pmu_fd[cpu] > 0) { + ioctl(st->pmu_fd[cpu], PERF_EVENT_IOC_DISABLE); + close(st->pmu_fd[cpu]); + } + } + +#if 0 + if (st->symb_histogram) { + (void)free(st->symb_histogram); + } + + if (st->stack_histogram) { + (void)free(st->stack_histogram); + } +#endif + if (st->ksymbs) { + destroy_ksymbs_tbl(st->ksymbs); + (void)free(st->ksymbs); + } + + if (st->pb_a) { + perf_buffer__free(st->pb_a); + } + if (st->pb_b) { + perf_buffer__free(st->pb_b); + } + if (st->svg_mng) { + destroy_svg_mng(st->svg_mng); + } + destroy_raw_stack_trace(st); + clear_stack_histo(st); + destroy_proc_cache_tbl(st); + + if (st->elf_reader) { + destroy_elf_reader(st->elf_reader); + } + + deinit_elf_symbs(); + + stacktrace_destroy_log_mgr(st); + + (void)free(st); + return; +} + +static struct satck_trace_s *create_stack_trace(void) +{ + int cpus_num = NR_CPUS; + size_t size = sizeof(struct satck_trace_s) + cpus_num * sizeof(int); + struct satck_trace_s *st = (struct satck_trace_s *)malloc(size); + if (!st) { + return NULL; + } + + (void)memset(st, 0, size); + st->cpus_num = cpus_num; + + init_stack_params(&(st->stack_params)); + +#if 0 + if (stacktrace_create_log_mgr(st)) { + goto err; + } +#endif + + st->svg_mng = create_svg_mng(st->stack_params.period); + if (!st->svg_mng) { + goto err; + } + + st->elf_reader = create_elf_reader(st->stack_params.debug_dir); + if (!st->elf_reader) { + goto err; + } + + if (set_svg_dir(st->svg_mng, st->stack_params.params[STACK_SVG_ONCPU].svg_dir, STACK_SVG_ONCPU)) { + goto err; + } + + if (set_flame_graph_path(st->svg_mng, st->stack_params.params[STACK_SVG_ONCPU].flame_graph, STACK_SVG_ONCPU)) { + goto err; + } + + st->raw_stack_traces = create_raw_stack_trace(st); + if (!st->raw_stack_traces) { + goto err; + } + + st->ksymbs = create_ksymbs_tbl(); + if (!st->ksymbs) { + goto err; + } + + if (load_kern_syms(st->ksymbs)) { + ERROR("[STACKPROBE]: Failed to load kern symbols.\n"); + goto err; + } + + (void)sort_kern_syms(st->ksymbs); + + st->running_times = (time_t)time(NULL); + + INFO("[STACKPROBE]: create stack trace succeed(cpus_num = %d, kern_symbols = %u).\n", + st->cpus_num, st->ksymbs->ksym_size); + return st; + +err: + destroy_stack_trace(&st); + return NULL; +} + +static int load_bpf_prog(struct satck_trace_s *st) +{ + int ret; + + struct perf_event_attr attr_type_sw = { + .sample_freq = SAMPLE_PERIOD, + .freq = 1, + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_CPU_CLOCK, + }; + + ret = bpf_prog_load(ON_CPU_PROG, BPF_PROG_TYPE_PERF_EVENT, &(st->obj), &(st->bpf_prog_fd)); + if (ret) { + ERROR("[STACKPROBE]: Failed to load bpf prog(err = %d).\n", ret); + goto err; + } + + INFO("[STACKPROBE]: load bpf prog succeed(%s).\n", ON_CPU_PROG); + + st->convert_map_fd = BPF_GET_MAP_FD(st->obj, "convert_map"); + st->stackmap_a_fd = BPF_GET_MAP_FD(st->obj, "stackmap_a"); + st->stackmap_b_fd = BPF_GET_MAP_FD(st->obj, "stackmap_b"); + st->stackmap_perf_a_fd = BPF_GET_MAP_FD(st->obj, "stackmap_perf_a"); + st->stackmap_perf_b_fd = BPF_GET_MAP_FD(st->obj, "stackmap_perf_b"); + + ret = BPF_PIN_MAP_PATH(st->obj, "proc_obj_map", PROC_MAP_PATH); + if (ret) { + ERROR("[STACKPROBE]: Failed to pin bpf map(err = %d).\n", ret); + goto err; + } + + st->pb_a = create_pref_buffer2(st->stackmap_perf_a_fd, process_raw_stack_trace, process_loss_data); + if (!st->pb_a) { + goto err; + } + + st->pb_b = create_pref_buffer2(st->stackmap_perf_b_fd, process_raw_stack_trace, process_loss_data); + if (!st->pb_b) { + goto err; + } + + for(int cpu = 0; cpu < st->cpus_num; cpu++) { + st->pmu_fd[cpu] = perf_event_open(&attr_type_sw, -1, cpu, -1, 0); + if (st->pmu_fd[cpu] < 0) { + ERROR("[STACKPROBE]: Failed open perf event.\n"); + goto err; + } + + ret = ioctl(st->pmu_fd[cpu], PERF_EVENT_IOC_ENABLE, 0); + if (ret) { + ERROR("[STACKPROBE]: Failed to PERF_EVENT_IOC_ENABLE(err = %d).\n", ret); + goto err; + } + + ret = ioctl(st->pmu_fd[cpu], PERF_EVENT_IOC_SET_BPF, st->bpf_prog_fd); + if (ret) { + ERROR("[STACKPROBE]: Failed to PERF_EVENT_IOC_SET_BPF(err = %d).\n", ret); + goto err; + } + + INFO("[STACKPROBE]: perf open and attach bpf succeed(cpu = %d).\n", cpu); + } + + return 0; + +err: + return -1; +} + +static void update_convert_counter(struct satck_trace_s *st) +{ + u32 key = 0; + (void)bpf_map_update_elem(st->convert_map_fd, &key, &(st->convert_stack_count), BPF_ANY); +} + +static void clear_stackmap(int stackmap_fd) +{ + u32 stackid = 0, next_id; + while (bpf_map_get_next_key(stackmap_fd, &stackid, &next_id) == 0) { + bpf_map_delete_elem(stackmap_fd, &next_id); + stackid = next_id; + } +} + +static void clear_running_ctx(struct satck_trace_s *st) +{ + u64 pcache_crt, pcache_del; + clear_raw_stack_trace(st); + clear_stackmap(get_stack_map_fd(st)); + clear_stack_histo(st); + + pcache_del = st->stats.count[STACK_STATS_PCACHE_DEL]; + pcache_crt = st->stats.count[STACK_STATS_PCACHE_CRT]; + (void)memset(&(st->stats), 0, sizeof(st->stats)); + st->stats.count[STACK_STATS_PCACHE_DEL] = pcache_del; + st->stats.count[STACK_STATS_PCACHE_CRT] = pcache_crt; +} + +static void record_running_ctx(struct satck_trace_s *st) +{ +#if 1 //GOPHER_DEBUG + int i, len, ret; + char *pos; + char buf[LINE_BUF_LEN]; + + const char *col[STACK_STATS_MAX] = {"RAW", "LOSS", "HISTO_ERR", "HISTO_FOLD", "ID2SYMBS", + "PCACHE_DEL", "PCACHE_CRT", "KERN_ERR", "USER_ERR", "MAP_LKUP_ERR", + "KERN_OK", "USER_OK", "KERN_USER", "P_CACHE", "SYMB_CACHE"}; + const int offset[STACK_STATS_MAX] = {-8, -8, -10, -12, -10, -12, -12, -10, -10, -14, -9, -9, -11, -9, 12}; + + printf("\n========================================================================================\n"); + + buf[0] = 0; + pos = buf; + len = LINE_BUF_LEN; + for (i = 0; i < STACK_STATS_MAX - 1; i++) { + ret = snprintf(pos, len, "%*s", offset[i], col[i]); + len -= ret; + pos += ret; + } + (void)snprintf(pos, len, "%*s\n", offset[i], col[i]); + + printf(buf); + + buf[0] = 0; + pos = buf; + len = LINE_BUF_LEN; + for (i = 0; i < STACK_STATS_MAX - 1; i++) { + ret = snprintf(pos, len, "%*llu", offset[i], st->stats.count[i]); + len -= ret; + pos += ret; + } + (void)snprintf(pos, len, "%*llu\n", offset[i], st->stats.count[i]); + printf(buf); +#endif + return; +} + +static void running(struct satck_trace_s *st) +{ + int ret; + + // Obtains the data channel used to read stack-trace data. + st->is_stackmap_a = ((st->convert_stack_count % 2) == 0); + struct perf_buffer *pb = get_pb(st); + + // Read raw stack-trace data from current data channel. + while ((ret = perf_buffer__poll(pb, 0)) >= 0) { + if (is_tmout(st)) { + break; + } + if (g_stop) { + break; + } + sleep(1); + } + + // Notify BPF to switch to another channel + st->convert_stack_count++; + update_convert_counter(st); + + (void)stack_id2histogram(st); + // Histogram format to flame graph + wr_flamegraph(st->svg_mng, st->oncpu_histo_tbl, STACK_SVG_ONCPU); + + record_running_ctx(st); + + // Clear the context information of the running environment. + clear_running_ctx(st); +} + +#ifdef EBPF_RLIM_LIMITED +#undef EBPF_RLIM_LIMITED +#endif +#define EBPF_RLIM_LIMITED 500*1024*1024 // 500M +int main(int argc, char **argv) +{ + int err = -1; + + if (signal(SIGINT, sig_int) == SIG_ERR) { + fprintf(stderr, "can't set signal handler: %d\n", errno); + return errno; + } + + err = args_parse(argc, argv, ¶ms); + if (err != 0) { + return -1; + } + + INIT_BPF_APP(stackprobe, EBPF_RLIM_LIMITED); + + g_st = create_stack_trace(); + if (!g_st) { + return -1; + } + + if (load_bpf_prog(g_st)) { + goto err; + } + + // Initializing the BPF Data Channel + update_convert_counter(g_st); + + INFO("[STACKPROBE]: Started successfully.\n"); + + while (!g_stop) { + running(g_st); + sleep(1); + } + +err: + destroy_stack_trace(&g_st); + return -err; +} diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/stackprobe.h b/src/probes/extends/ebpf.probe/src/stackprobe/stackprobe.h new file mode 100644 index 0000000000000000000000000000000000000000..06ec600ed52a11c8f619122773af4e4b971592ba --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/stackprobe.h @@ -0,0 +1,126 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: Mr.lu + * Create: 2022-08-18 + * Description: stack probe defined + ******************************************************************************/ +#ifndef __GOPHER_STACK_PROBE_H__ +#define __GOPHER_STACK_PROBE_H__ + +#pragma once + +#include "hash.h" +#include "symbol.h" +#include "svg.h" +#include "stack.h" + +struct stack_symbs_s { + struct addr_symb_s user_stack_symbs[PERF_MAX_STACK_DEPTH]; + struct addr_symb_s kern_stack_symbs[PERF_MAX_STACK_DEPTH]; + struct stack_pid_s pid; +}; + +struct raw_stack_trace_s { + u32 stack_size; + u32 raw_trace_count; + struct stack_id_s raw_traces[]; +}; + +#define __FUNC_NAME_LEN 64 +#define STACK_SYMBS_LEN (2 * (PERF_MAX_STACK_DEPTH * __FUNC_NAME_LEN)) // KERN + USER +struct stack_trace_histo_s { + H_HANDLE; + char stack_symbs_str[STACK_SYMBS_LEN]; + u64 count; +}; + +struct proc_cache_s { + H_HANDLE; + struct stack_pid_s k; + struct proc_symbs_s *proc_symbs; +}; + +enum stack_stats_e { + STACK_STATS_RAW = 0, + STACK_STATS_LOSS = 1, + STACK_STATS_HISTO_ERR, + STACK_STATS_HISTO_FOLDED, + STACK_STATS_ID2SYMBS, + STACK_STATS_PCACHE_DEL, + STACK_STATS_PCACHE_CRT, + STACK_STATS_KERN_ADDR_ERR, + STACK_STATS_USR_ADDR_ERR, + STACK_STATS_MAP_LKUP_ERR, + STACK_STATS_KERN_ADDR, + STACK_STATS_USR_ADDR, + STACK_STATS_USR_KERN_ADDR, + STACK_STATS_P_CACHE, + STACK_STATS_SYMB_CACHE, + + STACK_STATS_MAX +}; + +struct stack_stats_s { + u64 count[STACK_STATS_MAX]; +}; + +struct flame_graph_param_s { + char svg_dir[PATH_LEN]; + char flame_graph[PATH_LEN]; +}; + +struct stack_param_s { + u32 period; + char logs[PATH_LEN]; + char debug_dir[PATH_LEN]; + struct flame_graph_param_s params[STACK_SVG_MAX]; +}; + +struct satck_trace_s { + char is_stackmap_a; + char pad[3]; + int cpus_num; + int bpf_prog_fd; + int convert_map_fd; + int stackmap_a_fd; + int stackmap_b_fd; + int stackmap_perf_a_fd; + int stackmap_perf_b_fd; + time_t running_times; + + u64 convert_stack_count; + + struct perf_buffer* pb_a; + struct perf_buffer* pb_b; + struct bpf_object *obj; + + struct raw_stack_trace_s *raw_stack_traces; + struct stack_trace_histo_s *oncpu_histo_tbl; + + struct ksymb_tbl_s *ksymbs; + struct proc_cache_s *proc_cache; + u32 proc_cache_mirro_count; + struct proc_cache_s *proc_cache_mirro[PROC_CACHE_MAX_COUNT]; // No release is required. + + struct stack_svg_mng_s *svg_mng; + + struct elf_reader_s *elf_reader; + + struct stack_stats_s stats; + + void* log_mgr; + + struct stack_param_s stack_params; + + int pmu_fd[]; // It must be put to the last. +}; + +#endif diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/svg.c b/src/probes/extends/ebpf.probe/src/stackprobe/svg.c new file mode 100644 index 0000000000000000000000000000000000000000..b77bcc0fb58e7b6a7eede61b930d8086ee150bf4 --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/svg.c @@ -0,0 +1,299 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: luzhihao + * Create: 2022-08-22 + * Description: svg prog + ******************************************************************************/ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef BPF_PROG_KERN +#undef BPF_PROG_KERN +#endif + +#ifdef BPF_PROG_USER +#undef BPF_PROG_USER +#endif + +#include "bpf.h" +#include "stack.h" +#include "svg.h" + +#define __COMMAND_LEN (2 * COMMAND_LEN) +#define FAMEGRAPH_BIN "/usr/bin/flamegraph.pl" +#define ONCPU_SVG_COMMAND "%s --title=\" %s \" %s %s > %s" + +struct svg_param_s { + char *file_name; + char *params; + char *titile; +}; + +static struct svg_param_s svg_params[STACK_SVG_MAX] = + {{"oncpu", "--countname=us", "On-CPU Time Flame Graph"}, + {"offcpu", "--countname=us", "Off-CPU Time Flame Graph"}, + {"io", "--colors=io --countname=us", "IO Time Flame Graph"}}; + +#if 1 +static void __rm_svg(const char *svg_file) +{ + FILE *fp; + char commad[__COMMAND_LEN]; + const char *fmt = "rm -rf %s"; + + if (access(svg_file, 0) != 0) { + return; + } + + commad[0] = 0; + (void)snprintf(commad, __COMMAND_LEN, fmt, svg_file); + fp = popen(commad, "r"); + if (fp != NULL) { + (void)pclose(fp); + fp = NULL; + INFO("[SVG]: Delete svg file(%s)\n", svg_file); + } +} + +static int __new_svg(const char *flame_graph, const char *svg_file, enum stack_svg_type_e en_type) +{ + const char *flamegraph_bin = FAMEGRAPH_BIN; + char commad[__COMMAND_LEN]; + FILE *fp; + + if (access(flamegraph_bin, 0) != 0) { + ERROR("[SVG]: Please install flame graph rpm.\n"); + return -1; + } + + if (access(flame_graph, 0) != 0) { + ERROR("[SVG]: %s is not exist.\n", flame_graph); + return -1; + } + + commad[0] = 0; + (void)snprintf(commad, __COMMAND_LEN, ONCPU_SVG_COMMAND, + flamegraph_bin, svg_params[en_type].titile, + svg_params[en_type].params, flame_graph, svg_file); + + fp = popen(commad, "r"); + if (fp != NULL) { + (void)pclose(fp); + fp = NULL; + INFO("[SVG]: Create svg file(%s)\n", svg_file); + return 0; + } + + return -1; +} + +static void __destroy_flamegraph(struct stack_flamegraph_s *flame_graph) +{ + if (flame_graph->fp) { + (void)fclose(flame_graph->fp); + } + flame_graph->fp = NULL; + if (flame_graph->flame_graph_dir) { + (void)free(flame_graph->flame_graph_dir); + } + flame_graph->flame_graph_dir = NULL; + return; +} + +static void __destroy_svg_files(struct stack_svg_s *svg_files) +{ + char *file; + for (int i = 0; i < svg_files->capacity; i++) { + file = svg_files->files[i]; + if (file) { + (void)free(file); + } + } + if (svg_files->files) { + (void)free(svg_files->files); + svg_files->files = NULL; + } + return; +} + +static int __create_svg_files(struct stack_svg_s* svg_files, u32 period) +{ + size_t svg_capacity; + char **files; + + svg_capacity = (size_t)DIV_ROUND_UP(WEEKS_TIME, period); + files = (char **)malloc(svg_capacity * sizeof(char *)); + if (!files) { + return -1; + } + (void)memset(files, 0, svg_capacity * sizeof(char *)); + svg_files->capacity = svg_capacity; + svg_files->files = files; + svg_files->next = 0; + return 0; +} + +static int stack_get_next_svg_file(struct stack_svgs_s* svgs, enum stack_svg_type_e en_type, char svg_file[], size_t size) +{ + int next; + char svg_name[PATH_LEN]; + + if (svgs->svg_files.files == NULL) { + return -1; + } + + if (svgs->svg_files.capacity == 0) { + return -1; + } + + next = svgs->svg_files.next; + if (svgs->svg_files.files[next] != NULL) { + __rm_svg(svgs->svg_files.files[next]); + (void)free(svgs->svg_files.files[next]); + svgs->svg_files.files[next] = NULL; + } + + svg_name[0] = 0; + (void)snprintf(svg_name, PATH_LEN, "%s_%s.svg", svg_params[en_type].file_name, get_cur_time()); + + svg_file[0] = 0; + (void)snprintf(svg_file, size, "%s/%s", svgs->svg_dir, svg_name); + __rm_svg(svg_file); + + svgs->svg_files.files[next] = strdup(svg_file); + next = (next + 1) % svgs->svg_files.capacity; + svgs->svg_files.next = next; + return 0; +} +#endif + +char is_svg_tmout(struct stack_svg_mng_s* svg_mng, enum stack_svg_type_e en_type) +{ + struct stack_svgs_s *svgs = &(svg_mng->svgs[en_type]); + time_t current = (time_t)time(NULL); + time_t secs; + + if (current > svgs->last_create_time) { + secs = current - svgs->last_create_time; + if (secs >= svgs->period) { + svgs->last_create_time = current; + return 1; + } + } + return 0; +} + +int create_svg_file(struct stack_svg_mng_s* svg_mng, enum stack_svg_type_e en_type, const char *flame_graph) +{ + char svg_file[PATH_LEN]; + struct stack_svgs_s* svgs; + + svgs = &(svg_mng->svgs[en_type]); + + if (stack_get_next_svg_file(svgs, en_type, svg_file, PATH_LEN)) { + return -1; + } + + return __new_svg(flame_graph, (const char *)svg_file, en_type); +} + +struct stack_svg_mng_s* create_svg_mng(u32 default_period) +{ + struct stack_svgs_s *svgs; + enum stack_svg_type_e en_type = STACK_SVG_ONCPU; + struct stack_svg_mng_s* svg_mng = malloc(sizeof(struct stack_svg_mng_s)); + if (!svg_mng) { + return NULL; + } + + (void)memset(svg_mng, 0, sizeof(struct stack_svg_mng_s)); + for (; en_type < STACK_SVG_MAX; en_type++) { + svgs = &(svg_mng->svgs[en_type]); + svgs->last_create_time = (time_t)time(NULL); + svgs->period = default_period; + (void)__create_svg_files(&(svgs->svg_files), default_period); + } + return svg_mng; +} + +void destroy_svg_mng(struct stack_svg_mng_s* svg_mng) +{ + struct stack_svgs_s *svgs; + struct stack_flamegraph_s *flame_graph; + enum stack_svg_type_e en_type = STACK_SVG_ONCPU; + + if (!svg_mng) { + return; + } + + for (; en_type < STACK_SVG_MAX; en_type++) { + svgs = &(svg_mng->svgs[en_type]); + __destroy_svg_files(&(svgs->svg_files)); + + flame_graph = &(svg_mng->flame_graphs[en_type]); + __destroy_flamegraph(flame_graph); + } + (void)free(svg_mng); + return; +} + +int set_svg_dir(struct stack_svg_mng_s* svg_mng, const char *dir, enum stack_svg_type_e en_type) +{ + size_t len; + struct stack_svgs_s *svgs; + + if (!svg_mng) { + return -1; + } + + len = strlen(dir); + if (len <= 1 || len >= PATH_LEN) { + return -1; + } + + svgs = &(svg_mng->svgs[en_type]); + if (dir[len - 1] == '/') { + (void)strncpy(svgs->svg_dir, dir, len - 1); + } else { + (void)strncpy(svgs->svg_dir, dir, len); + } + return 0; +} + +int set_svg_period(struct stack_svg_mng_s* svg_mng, u32 period, enum stack_svg_type_e en_type) +{ + struct stack_svgs_s *svgs; + + if (!svg_mng) { + return -1; + } + svgs = &(svg_mng->svgs[en_type]); + + __destroy_svg_files(&svgs->svg_files); + if (__create_svg_files(&svgs->svg_files, period)) { + return -1; + } + svgs->period = period; + + return 0; +} + diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/svg.h b/src/probes/extends/ebpf.probe/src/stackprobe/svg.h new file mode 100644 index 0000000000000000000000000000000000000000..fd104de6652c14674bad40bb0543238d7e68cca5 --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/svg.h @@ -0,0 +1,67 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: Mr.lu + * Create: 2022-08-18 + * Description: svg defined + ******************************************************************************/ +#ifndef __GOPHER_SVG_H__ +#define __GOPHER_SVG_H__ + +#pragma once + +#include +#include "stack.h" + +#define DAYS_TIME (24 * 60 *60) // 1 DAY +#define WEEKS_TIME (DAYS_TIME * 7) // 1 WEEK + +enum stack_svg_type_e { + STACK_SVG_ONCPU = 0, + STACK_SVG_OFFCPU = 1, + STACK_SVG_IO, + + STACK_SVG_MAX +}; + +struct stack_svg_s { + int next; + size_t capacity; + char **files; +}; + +struct stack_svgs_s { + u32 period; // unit is second + char svg_dir[PATH_LEN]; + time_t last_create_time; + struct stack_svg_s svg_files; +}; + +#define FLAME_GRAPH_NEW 0x00000001 +struct stack_flamegraph_s { + u32 flags; + FILE *fp; + char flame_graph_file[PATH_LEN]; + char *flame_graph_dir; +}; + +struct stack_svg_mng_s { + struct stack_svgs_s svgs[STACK_SVG_MAX]; + struct stack_flamegraph_s flame_graphs[STACK_SVG_MAX]; +}; + +struct stack_svg_mng_s* create_svg_mng(u32 default_period); +void destroy_svg_mng(struct stack_svg_mng_s* svg_mng); +int set_svg_dir(struct stack_svg_mng_s* svg_mng, const char *dir, enum stack_svg_type_e en_type); +int set_svg_period(struct stack_svg_mng_s* svg_mng, u32 period, enum stack_svg_type_e en_type); +int create_svg_file(struct stack_svg_mng_s* svg_mng, enum stack_svg_type_e en_type, const char *flame_graph); +char is_svg_tmout(struct stack_svg_mng_s* svg_mng, enum stack_svg_type_e en_type); + +#endif diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/symbol.c b/src/probes/extends/ebpf.probe/src/stackprobe/symbol.c new file mode 100644 index 0000000000000000000000000000000000000000..ae631add3c459ed189f6ec3547b299f938682d64 --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/symbol.c @@ -0,0 +1,1175 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: luzhihao + * Create: 2022-08-22 + * Description: symbol module + ******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "gopher_elf.h" +#include "debug_elf_reader.h" +#include "elf_symb.h" +#include "symbol.h" + +#ifdef symbs +#undef symbs +#endif +#define symbs mod_symbs->__symbs + +#ifdef symbs_count +#undef symbs_count +#endif +#define symbs_count mod_symbs->symbs_count + +#ifdef symbs_capability +#undef symbs_capability +#endif +#define symbs_capability mod_symbs->symbs_capability + +enum symbol_err_e { + GET_MOD_NAME = -2, + GET_MOD_TYPE = -3, + GET_MOD_PATH = -4, + ADD_MOD_RANGE = -5, + FMT_MAP = -6, + LOAD_SYMBS = -7, + SORT_SYMBS = -8, + GET_ELF_OFFSET = -9, + ADD_MOD = -10, +}; +#define __UNKNOW_NAME "Unknow" +static char __kern_unknow_symb[] = "[kernel]"; +#if 0 // def GOPHER_DEBUG + +#define __RANGE_COL_NUM 3 +static void __print_range_header(void) +{ + int i, len, ret; + char *pos; + char buf[LINE_BUF_LEN]; + + const char *col[__RANGE_COL_NUM] = {"START", "END", "OFFSET"}; + const int offset[__RANGE_COL_NUM] = {-24, -22, 24}; + + buf[0] = 0; + pos = buf; + len = LINE_BUF_LEN; + for (i = 0; i < __RANGE_COL_NUM - 1; i++) { + ret = snprintf(pos, len, "%*s", offset[i], col[i]); + len -= ret; + pos += ret; + } + (void)snprintf(pos, len, "%*s\n", offset[i], col[i]); + INFO(buf); +} + +static void __print_range(struct mod_addr_rage_s *range) +{ + int i, len, ret; + char *pos; + char buf[LINE_BUF_LEN]; + + const int offset[__RANGE_COL_NUM] = {-24, -22, 24}; + + i = 0; + buf[0] = 0; + pos = buf; + len = LINE_BUF_LEN; + + ret = snprintf(pos, len, "0x%*llx", offset[i++], range->start); + len -= ret; + pos += ret; + + ret = snprintf(pos, len, "0x%*llx", offset[i++], range->end); + len -= ret; + pos += ret; + + (void)snprintf(pos, len, "%*llx\n", offset[i++], range->f_offset); + INFO(buf); +} + +#define __SYMB_COL_NUM 3 +static void __print_symbs_header(void) +{ + int i, len, ret; + char *pos; + char buf[LINE_BUF_LEN]; + + const char *col[__SYMB_COL_NUM] = {"SYMB_NAME", "START", "SIZE"}; + const int offset[__SYMB_COL_NUM] = {-64, -12, 12}; + + buf[0] = 0; + pos = buf; + len = LINE_BUF_LEN; + for (i = 0; i < __SYMB_COL_NUM - 1; i++) { + ret = snprintf(pos, len, "%*s", offset[i], col[i]); + len -= ret; + pos += ret; + } + (void)snprintf(pos, len, "%*s\n", offset[i], col[i]); + INFO(buf); +} + +static void __print_symbs(struct symb_s *symb) +{ + int i, len, ret; + char *pos; + char buf[LINE_BUF_LEN]; + + const int offset[__SYMB_COL_NUM] = {-64, -12, 12}; + + i = 0; + buf[0] = 0; + pos = buf; + len = LINE_BUF_LEN; + + ret = snprintf(pos, len, "%*s", offset[i++], (symb->symb_name ? : __UNKNOW_NAME)); + len -= ret; + pos += ret; + + ret = snprintf(pos, len, "%*llx", offset[i++], symb->start); + len -= ret; + pos += ret; + + (void)snprintf(pos, len, "%*llx\n", offset[i++], symb->size); + INFO(buf); +} + +#define __MOD_COL_NUM 7 +static void __print_mods(struct mod_s *mod) +{ + int i, len, ret; + char *pos; + char buf[LINE_BUF_LEN]; + + const char *col[__MOD_COL_NUM] = {"MOD_NAME", "TYPE", "PATH", "ELF", "ELF_OFFSET", + "RANGE", "SYMBOS"}; + const int offset[__MOD_COL_NUM] = {-32, -12, -32, -12, -12, -12, 12}; + + buf[0] = 0; + pos = buf; + len = LINE_BUF_LEN; + for (i = 0; i < __MOD_COL_NUM - 1; i++) { + ret = snprintf(pos, len, "%*s", offset[i], col[i]); + len -= ret; + pos += ret; + } + (void)snprintf(pos, len, "%*s\n", offset[i], col[i]); + INFO(buf); + + i = 0; + buf[0] = 0; + pos = buf; + len = LINE_BUF_LEN; + + ret = snprintf(pos, len, "%*s", offset[i++], (mod->mod_name ? : __UNKNOW_NAME)); + len -= ret; + pos += ret; + + ret = snprintf(pos, len, "%*u", offset[i++], mod->mod_type); + len -= ret; + pos += ret; + + ret = snprintf(pos, len, "%*s", offset[i++], (mod->mod_path ? : __UNKNOW_NAME)); + len -= ret; + pos += ret; + + ret = snprintf(pos, len, "%*llx", offset[i++], mod->mod_elf_so_addr); + len -= ret; + pos += ret; + + ret = snprintf(pos, len, "%*llx", offset[i++], mod->mod_elf_so_offset); + len -= ret; + pos += ret; + + ret = snprintf(pos, len, "%*u", offset[i++], mod->addr_ranges_count); + len -= ret; + pos += ret; + + (void)snprintf(pos, len, "%*u\n", offset[i++], mod->symbs_count); + INFO(buf); + +#if 0 + if (mod->symbs_count > 0) { + __print_symbs_header(); + } + for (i = 0; i < mod->symbs_count; i++) { + if (mod->symbs[i]) { + __print_symbs(mod->symbs[i]); + } + } +#endif + if (mod->addr_ranges_count > 0) { + __print_range_header(); + } + for (i = 0; i < mod->addr_ranges_count; i++) { + __print_range(&mod->addr_ranges[i]); + } +} + +static void __print_proc_ranges(struct proc_symbs_s* proc_symbs) +{ + struct mod_s *mod; + __print_range_header(); + + for (int i = 0; i < proc_symbs->mods_count; i++) { + mod = proc_symbs->mods[i]; + if (mod) { + for (int j = 0; j < mod->addr_ranges_count; j++) { + __print_range(&mod->addr_ranges[j]); + } + } + } +} + +static void __print_mod_symbs(struct mod_s *mod) +{ + __print_symbs_header(); + + for (int i = 0; i < mod->symbs_count; i++) { + if (mod->symbs[i]) { + __print_symbs(mod->symbs[i]); + } + } +} + +static void __print_proc(struct proc_symbs_s* proc_symbs) +{ + INFO("[SYMBOL]: loaded proc symbos [%s, %d], %u mods\n", + proc_symbs->comm, proc_symbs->proc_id, proc_symbs->mods_count); + for (int i = 0; i < proc_symbs->mods_count; i++) { + if (proc_symbs->mods[i]) { + __print_mods(proc_symbs->mods[i]); + } + } +} +#endif + +#if 1 +#define KSYMB_NAME_LEN 64 +#define KSYMB_MOD_LEN 64 +#define KSYMB_MAX 1000000 +#define ADDR_ERR(addr) (((addr) == 0) || ((addr) == 0xFFFFFFFFFFFFFFFF) \ + || ((addr) < KERN_ADDR_SPACE)) + +#define KSYMB_ERR(symb) IS_KERN_DATA_SYMBOL(symb) + +static void destroy_ksymbs(struct ksymb_s *ksym) +{ + if (ksym) { + if (ksym->kmod) { + (void)free(ksym->kmod); + ksym->kmod = NULL; + } + if (ksym->sym) { + (void)free(ksym->sym); + ksym->sym = NULL; + } + ksym->addr = 0; + } +} + +static int resolve_ksymbs(const char *s, struct ksymb_s* ksymb) +{ + char *p, *p1, *p2; + char symb_type; + char symb[KSYMB_NAME_LEN]; + char kmod[KSYMB_MOD_LEN]; + size_t name_len = 0, mod_len = 0; + + ksymb->addr = strtoull(s, &p, 16); + if (ADDR_ERR(ksymb->addr)) { + goto err; + } + + p++; + symb_type = *p; + if (KSYMB_ERR(symb_type)) { + goto err; + } + + p += 2; // point to kern symbol name + while (*p != ' ' && *p != '\n' && name_len < KSYMB_NAME_LEN) { + symb[name_len++] = *p; + p++; + } + symb[name_len] = 0; + + p1 = strchr(p, '['); + p2 = strchr(p, ']'); + if (p1 && p2) { + p = p1 + 1; + while (*p != ' ' && *p != '\n' && p != p2 && mod_len < KSYMB_MOD_LEN) { + kmod[mod_len++] = *p; + p++; + } + kmod[mod_len] = 0; + } + + if (name_len == 0) { + goto err; + } + + ksymb->sym = (char *)malloc(name_len + 1); + if (!ksymb->sym) { + goto err; + } + + (void)memcpy(ksymb->sym, symb, name_len + 1); + + if (mod_len > 0) { + ksymb->kmod = (char *)malloc(mod_len + 1); + if (!ksymb->kmod) { + goto err; + } + (void)memcpy(ksymb->kmod, kmod, mod_len + 1); + } + return 0; + +err: + destroy_ksymbs(ksymb); + return -1; +} + +void destroy_ksymbs_tbl(struct ksymb_tbl_s *ksym_tbl) +{ + if (!ksym_tbl) { + return; + } + + for(int i = 0; i < ksym_tbl->ksym_size; i++) { + destroy_ksymbs(&(ksym_tbl->ksyms[i])); + } +} + +struct ksymb_tbl_s* create_ksymbs_tbl(void) +{ + size_t size = KSYMB_MAX * sizeof(struct ksymb_s) + sizeof(struct ksymb_tbl_s); + struct ksymb_tbl_s *tbl = (struct ksymb_tbl_s *)malloc(size); + if (!tbl) { + return NULL; + } + (void)memset(tbl, 0, size); + tbl->ksym_size = 0; + return tbl; +} + +int search_kern_addr_symb(struct ksymb_tbl_s *ksymbs, u64 addr, struct addr_symb_s *addr_symb) +{ + int start, end; + int result; + size_t mid; + + if (!ksymbs) { + return -1; + } + + // init data + addr_symb->orign_addr = addr; + addr_symb->sym = NULL; + addr_symb->mod = __kern_unknow_symb; + addr_symb->offset = 0; + + start = 0; + end = ksymbs->ksym_size; + + while (start < end) { + mid = start + (end - start) / 2; + + result = addr - ksymbs->ksyms[mid].addr; + if (result < 0) { + end = mid; + } else if (result > 0) { + start = mid + 1; + } else { + addr_symb->sym = ksymbs->ksyms[mid].sym; + addr_symb->mod = ksymbs->ksyms[mid].kmod; + addr_symb->offset = 0; + return 0; + } + } + + if (start >= 1) { + if (ksymbs->ksyms[start - 1].addr < addr) { + addr_symb->sym = ksymbs->ksyms[start - 1].sym; + addr_symb->mod = ksymbs->ksyms[start - 1].kmod; + addr_symb->offset = addr - ksymbs->ksyms[start - 1].addr; + return 0; + } + + if (ksymbs->ksyms[start - 1].addr > addr) { + addr_symb->sym = ksymbs->ksyms[start - 1].sym; + addr_symb->mod = ksymbs->ksyms[start - 1].kmod; + addr_symb->offset = ksymbs->ksyms[start - 1].addr - addr; + return 0; + } + } + + return -1; +} + +static int __ksymb_cmp(const void *key1, const void *key2) +{ + struct ksymb_s *symb1 = ((struct ksymb_s *)key1); + struct ksymb_s *symb2 = ((struct ksymb_s *)key2); + return symb1->addr - symb2->addr; +} + +int sort_kern_syms(struct ksymb_tbl_s *ksymbs) +{ + if (!ksymbs) { + return -1; + } + qsort(ksymbs->ksyms, ksymbs->ksym_size, sizeof(struct ksymb_s), __ksymb_cmp); + return 0; +} + +int load_kern_syms(struct ksymb_tbl_s *ksymbs) +{ + FILE *kallsyms; + char line[LINE_BUF_LEN]; + + if (!ksymbs) { + return -1; + } + + kallsyms = fopen("/proc/kallsyms", "r"); + if (!kallsyms) { + return -2; + } + + line[0] = 0; + while (fgets(line, sizeof(line), kallsyms)) { + if (ksymbs->ksym_size >= KSYMB_MAX) { + ERROR("[SYMBOL]: Too many kern symbols.\n"); + break; + } + if (resolve_ksymbs((const char *)line, &(ksymbs->ksyms[ksymbs->ksym_size]))) { + continue; + } + + ksymbs->ksym_size++; + } + + fclose(kallsyms); + return 0; +} + +#endif + +#if 1 +#if 0 +static void symb_destroy(struct symb_s *symb) +{ + if (!symb) { + return; + } + + if (symb->symb_name) { + (void)free(symb->symb_name); + symb->symb_name = NULL; + } + return; +} +#endif +static void mod_info_destroy(struct mod_info_s *mod_info) +{ + if (mod_info->name) { + (void)free(mod_info->name); + mod_info->name = NULL; + } + if (mod_info->path) { + (void)free(mod_info->path); + mod_info->path = NULL; + } +} + +static void mod_destroy(struct mod_s *mod) +{ + if (!mod) { + return; + } + + mod_info_destroy(&(mod->__mod_info)); + + rm_elf_symb(mod->mod_symbs); + rm_elf_symb(mod->debug_symbs); + + mod->mod_symbs = NULL; + mod->debug_symbs = NULL; + return; +} + +static void proc_symbs_destroy(struct proc_symbs_s *proc_symbs) +{ + if (!proc_symbs) { + return; + } + for (int i = 0; i < proc_symbs->mods_count; i++) { + mod_destroy(proc_symbs->mods[i]); + if (proc_symbs->mods[i]) { + (void)free(proc_symbs->mods[i]); + proc_symbs->mods[i] = NULL; + } + } + return; +} +#endif + +#if 0 +static int inc_symbs_capability(struct mod_s* mod) +{ + u32 new_capa, old_capa; + struct symb_s** new_symbs_capa; + struct symb_s** old_symbs_capa; + + old_capa = mod->symbs_capability; + new_capa = mod->symbs_capability + SYMBS_STEP_COUNT; + if (new_capa >= SYMBS_MAX_COUNT) { + return -1; + } + + old_symbs_capa = mod->__symbs; + + new_symbs_capa = (struct symb_s **)malloc(new_capa * sizeof(struct symb_s *)); + if (!new_symbs_capa) { + return -1; + } + + (void)memset(new_symbs_capa, 0, new_capa * sizeof(struct symb_s *)); + if (old_capa > 0 && old_symbs_capa != NULL) { + (void)memcpy(new_symbs_capa, old_symbs_capa, old_capa * sizeof(struct symb_s *)); + } + if (old_symbs_capa != NULL) { + (void)free(old_symbs_capa); + old_symbs_capa = NULL; + } + mod->__symbs = new_symbs_capa; + mod->symbs_capability = new_capa; + return 0; +} + +static ELF_CB_RET __add_symbs(const char *symb, u64 addr_start, u64 size, void *ctx) +{ + struct mod_s* mod = ctx; + struct symb_s* new_symb; + + if (mod->symbs_count >= mod->symbs_capability) { + if (inc_symbs_capability(mod)) { + ERROR("[SYMBOL]: Too many symbos(%s).\n", mod->mod_name ?: __UNKNOW_NAME); + return ELF_SYMB_CB_ERR; + } + } + + new_symb = (struct symb_s*)malloc(sizeof(struct symb_s)); + if (!new_symb) { + return ELF_SYMB_CB_ERR; + } + + (void)memset(new_symb, 0, sizeof(struct symb_s)); + new_symb->start = addr_start; + new_symb->size = size; + new_symb->symb_name = strdup(symb); + SPLIT_NEWLINE_SYMBOL(new_symb->symb_name); + + mod->symbs[mod->symbs_count++] = new_symb; + return ELF_SYMB_CB_OK; +} + +static int __symb_cmp(const void *a, const void *b) +{ + struct symb_s **symb1 = (struct symb_s **)a; + struct symb_s **symb2 = (struct symb_s **)b; + + return (*symb1)->start - (*symb2)->start; +} + +static int sort_symbs(struct mod_s* mod) +{ + if (!mod) { + return SORT_SYMBS; + } + if (mod->symbs_count == 0) { + return 0; + } + qsort(mod->symbs, mod->symbs_count, sizeof(struct symb_s *), __symb_cmp); + return 0; +} +#endif + +#if 1 +static u64 __get_mod_target_addr(struct mod_s* mod, struct mod_addr_rage_s *range, u64 addr) +{ + if (mod->mod_type == MODULE_SO || mod->mod_type == MODULE_VDSO) { + return addr - (range->start - range->f_offset) + + (mod->mod_elf_so_addr - mod->mod_elf_so_offset); + } else { + return addr; + } +} + +static char is_mod_contain_addr(struct mod_s* mod, u64 addr, u64 *target_addr) +{ + struct mod_addr_rage_s *range; + for (int i = 0; i < mod->addr_ranges_count; i++) { + range = &(mod->addr_ranges[i]); + if (addr >= range->start && addr < range->end) { + *target_addr = __get_mod_target_addr(mod, range, addr); + return 1; + } + } + return 0; +} + +#if 0 +#define MOD_ERR_INDEX(mod, index) (((index) < 0) || (mod->symbs_count <= (index))) + +static int search_addr_upper_bound(struct mod_s* mod, int bgn, int end, u64 target_addr) +{ + int left = bgn, right = end, mid = 0; + + if ((bgn >= end) || (bgn < 0) || (end < 0)) { + return -1; + } + + while (left < right) { + mid = (left + right) / 2; + if (mid >= mod->symbs_count) { + return -1; + } + if (target_addr >= mod->symbs[mid]->start) { + left = mid + 1; + } else { + right = mid - 1; + } + } + + if (MOD_ERR_INDEX(mod, right)) { + return -1; + } + return target_addr >= mod->symbs[right]->start ? (right + 1): right; +} + +static int __do_mod_search_addr(struct mod_s* mod, u64 orign_addr, u64 target_addr, struct addr_symb_s* addr_symb) +{ + u64 range; + int search_index = search_addr_upper_bound(mod, 0, mod->symbs_count, target_addr); + + // Take a step back. + search_index -= 1; + if (MOD_ERR_INDEX(mod, search_index)) { + return -1; + } + + range = mod->symbs[search_index]->start; + + while (!MOD_ERR_INDEX(mod, search_index) && target_addr >= mod->symbs[search_index]->start) { + if (target_addr < mod->symbs[search_index]->start + mod->symbs[search_index]->size) { + addr_symb->sym = mod->symbs[search_index]->symb_name; + addr_symb->offset = target_addr - mod->symbs[search_index]->start; + addr_symb->orign_addr = orign_addr; + addr_symb->mod = mod->mod_name; + return 0; + } + if (range > mod->symbs[search_index]->start + mod->symbs[search_index]->size) { + break; + } + // Take a step back. + search_index -= 1; + } + + return -1; +} +#endif +#endif + +#if 1 + +static int load_debug_symbs(struct proc_symbs_s* proc_symbs, struct mod_s* mod) +{ + char debug_file[PATH_LEN]; + + if (mod->mod_type != MODULE_SO && mod->mod_type != MODULE_EXEC) { + return 0; + } + + debug_file[0] = 0; + (void)get_elf_debug_file(mod->elf_reader, + proc_symbs->proc_id, + (const char *)mod->mod_name, + (const char *)mod->mod_path, + debug_file, + PATH_LEN); + + if (debug_file[0] != 0) { + mod->debug_symbs = get_elf_symb((const char *)debug_file); + } + +#if 0 + elf_symb = get_elf_symbol(mod->elf_reader, proc_symbs->proc_id, + (const char *)mod->mod_name, (const char *)mod->mod_path); + if (!elf_symb) { + return -1; + } + + mod->debug_symbs = elf_symb; +#endif + return 0; +} + +static int load_symbs(struct mod_s* mod) +{ + if (!mod || !mod->mod_path) { + return LOAD_SYMBS; + } + + if (mod->mod_type == MODULE_SO || mod->mod_type == MODULE_EXEC) { + mod->mod_symbs = get_elf_symb((const char *)(mod->mod_path)); + if (mod->mod_symbs == NULL) { + ERROR("[SYMBOL]: Failed to load elf %s.\n", mod->mod_path); + return LOAD_SYMBS; + } + return 0; +#if 0 + if (gopher_iter_elf_file_symb((const char *)(mod->mod_path), __add_symbs, mod)) { + ERROR("[SYMBOL]: Failed to load elf %s.\n", mod->mod_path); + return LOAD_SYMBS; + } else { + return 0; + } +#endif + } + + /* + if (mod->type == MODULE_MAP) { + return gopher_iter_perf_map_symb((const char *)(mod->mod_path), __add_symbs, mod); + } + */ + + // TOOD: MODULE_VDSO, MODULE_MAP + if ((mod->mod_type == MODULE_MAP) || (mod->mod_type == MODULE_VDSO)) { + return 0; + } + + return LOAD_SYMBS; +} + +static int get_mod_elf_so_offset(struct mod_s* mod) +{ + if (!mod) { + return GET_ELF_OFFSET; + } + + if (mod->mod_type != MODULE_SO) { + return 0; + } + + if (gopher_get_elf_text_section((const char *)mod->mod_path, + &mod->mod_elf_so_addr, &mod->mod_elf_so_offset)) { + ERROR("[SYMBOL]: Get elf offset failed(%s).\n", mod->mod_path); + return GET_ELF_OFFSET; + } + + return 0; +} + +static int add_mod(void *elf_reader, struct proc_symbs_s* proc_symbs, struct mod_info_s* mod_info) +{ + int ret; + struct mod_s* new_mod; + + if (proc_symbs->mods_count >= MOD_MAX_COUNT) { + return ADD_MOD; + } + + new_mod = malloc(sizeof(struct mod_s)); + if (!new_mod) { + return ADD_MOD; + } + (void)memset(new_mod, 0, sizeof(struct mod_s)); + + (void)memcpy(&(new_mod->__mod_info), mod_info, sizeof(struct mod_info_s)); + (void)memset(mod_info, 0, sizeof(struct mod_info_s)); // avoid refree + new_mod->elf_reader = elf_reader; + + if ((ret = load_symbs(new_mod)) && ret != 0) { + goto err; + } + + if (new_mod->symbs_count == 0) { + ret = 0; + goto err; + } + +#if 0 + if ((ret = sort_symbs(new_mod)) && ret != 0) { + goto err; + } +#endif + + if ((ret = get_mod_elf_so_offset(new_mod)) && ret != 0) { + goto err; + } + + (void)load_debug_symbs(proc_symbs, new_mod); + + new_mod->addr_ranges[0].start = new_mod->mod_start; + new_mod->addr_ranges[0].end = new_mod->mod_end; + new_mod->addr_ranges[0].f_offset = new_mod->mod_f_offset; + new_mod->addr_ranges_count = 1; + + proc_symbs->mods[proc_symbs->mods_count++] = new_mod; + return 0; + +err: + mod_destroy(new_mod); + (void)free(new_mod); + return ret; +} + +static int add_mod_range(struct mod_s* mod, u64 start, u64 end, u64 f_offset) +{ + if (mod->addr_ranges_count >= MOD_ADDR_RANGE_COUNT) { + return ADD_MOD_RANGE; + } + + mod->addr_ranges[mod->addr_ranges_count].start = start; + mod->addr_ranges[mod->addr_ranges_count].end = end; + mod->addr_ranges[mod->addr_ranges_count].f_offset = f_offset; + mod->addr_ranges_count++; + return 0; +} + +static char __is_perf_map(const char *perf_map_file) +{ + char *pos; + + if ((pos = strstr(perf_map_file, ".map")) != NULL) { + pos += strlen(".map"); + if (*pos == 0) { + return 1; + } + } + return 0; +} + +static int get_mod_type(struct mod_info_s* mod_info) +{ + int elf_type; + + if (!mod_info || !mod_info->path) { + return GET_MOD_TYPE; + } + + elf_type = gopher_get_elf_type((const char *)mod_info->path); + if (elf_type == ET_DYN) { + mod_info->type = MODULE_SO; + return 0; + } + + if (elf_type == ET_EXEC) { + mod_info->type = MODULE_EXEC; + return 0; + } + + if (__is_perf_map((const char *)mod_info->path)) { + mod_info->type = MODULE_MAP; + return 0; + } + + if (!strcmp(mod_info->path, "[vdso]")) { + mod_info->type = MODULE_VDSO; + return 0; + } + + mod_info->type = MODULE_UNKNOW; + return GET_MOD_TYPE; +} + +static void __do_get_mod_path_byname(struct mod_info_s* mod_info, int proc_id) +{ + char *fmt = "/proc/%d/root%s"; + char path[PATH_LEN]; + + path[0] = 0; + (void)snprintf(path, PATH_LEN, fmt, proc_id, mod_info->name); + mod_info->path = strdup(path); + return; +} + +#define IS_CONTAIN_STR(s, contain_s) (strstr(s, contain_s)) +#define IS_BACKEND_MOD(name) IS_CONTAIN_STR(name, "/memfd:") + +#define __PATH_LEN (PATH_LEN + 32) +static int get_mod_path(struct mod_info_s* mod_info, int proc_id) +{ + int ret = GET_MOD_PATH; + char fd_path[PATH_LEN]; + char fd_file[__PATH_LEN]; + DIR *ds = NULL; + struct stat f_stat; + struct dirent *dir_entry; + + if (!mod_info->name) { + return -1; + } + + if (!IS_BACKEND_MOD(mod_info->name)) { + __do_get_mod_path_byname(mod_info, proc_id); + return 0; + } + + fd_path[0] = 0; + (void)snprintf(fd_path, PATH_LEN, "/proc/%d/fd", proc_id); + ds = opendir(fd_path); + if (!ds) { + goto err; + } + + while ((dir_entry = readdir(ds)) != NULL) { + fd_file[0] = 0; + (void)snprintf(fd_file, __PATH_LEN, "/proc/%d/fd/%s", proc_id, dir_entry->d_name); + SPLIT_NEWLINE_SYMBOL(fd_file); + if (stat(fd_file, &f_stat)) { + continue; + } + + if (f_stat.st_ino == mod_info->inode) { + mod_info->path = strdup(fd_file); + ret = 0; + break; + } + } + +err: + if (ds) { + closedir(ds); + } + return ret; +} + +#define __MOD_CORRECT_TARGET "(deleted)" +#define IS_NUMBER(c) ((c) >= '0' && (c) <= '9') +#define IS_STARTED_STR(s, started_s) (!strncmp(s, started_s, strlen(started_s))) +#define MOD_NAME_ERR(name) IS_NUMBER(name[0]) \ + || IS_STARTED_STR(name, "/SYSV") || IS_STARTED_STR(name, "[vsyscall]") \ + || IS_STARTED_STR(name, "//anon") || IS_STARTED_STR(name, "/dev/zero") \ + || IS_STARTED_STR(name, "[stack") || IS_STARTED_STR(name, "[heap]") \ + || IS_STARTED_STR(name, "/anon_hugepage") || IS_STARTED_STR(name, "[uprobes]") +static int get_mod_name(struct mod_info_s* mod_info, char *maps_line) +{ + char *end, *name, *target; + + target = strstr(maps_line, __MOD_CORRECT_TARGET); + if (target) { + end = target - 1; + *end = 0; + } + + end = maps_line + strlen(maps_line); + while (*end != ' ' && end > maps_line) { + end--; + } + name = end + 1; + if (MOD_NAME_ERR(name)) { + return GET_MOD_NAME; + } + + mod_info->name = strdup(name); + if (!mod_info->name) { + return GET_MOD_NAME; + } + + SPLIT_NEWLINE_SYMBOL(mod_info->name); + + return 0; +} +#endif + +#if 1 +static struct mod_s* proc_get_mod_by_name(struct proc_symbs_s* proc_symbs, const char *name) +{ + struct mod_s *mod; + + for (int i = 0; i < proc_symbs->mods_count; i++) { + mod = proc_symbs->mods[i]; + if (mod != NULL && mod->mod_name != NULL && !strcmp(mod->mod_name, name)) { + return mod; + } + } + return NULL; +} + +#define MAPS_PERM_MAX 5 +#define MAPS_IS_EXEC_PERM(perm) (perm[2] == 'x') + +static int proc_iter_maps(void *elf_reader, struct proc_symbs_s* proc_symbs, FILE *fp) +{ + int ret = 0, is_over = 0; + u64 dev_major __maybe_unused; + u64 dev_minor __maybe_unused; + struct mod_info_s mod_info; + struct mod_s *exist_mod; + char line[LINE_BUF_LEN]; + char maps_perm[MAPS_PERM_MAX]; + + while (fgets(line, sizeof(line), fp)) { + maps_perm[0] = 0; + (void)memset(&mod_info, 0, sizeof(mod_info)); + ret = 0; + if (sscanf(line, "%llx-%llx %4s %llx %llx:%llx %llu", + &mod_info.start, &mod_info.end, maps_perm, &mod_info.f_offset, + &dev_major, &dev_minor, &mod_info.inode) != 7) { + ret = FMT_MAP; + is_over = 1; + goto next; + } + + if (!MAPS_IS_EXEC_PERM(maps_perm)) { + goto next; + } + + if ((ret = get_mod_name(&mod_info, line)) && ret != 0) { + goto next; + } + + exist_mod = proc_get_mod_by_name(proc_symbs, (const char *)mod_info.name); + if (exist_mod) { + if ((ret = add_mod_range(exist_mod, mod_info.start, mod_info.end, mod_info.f_offset)) && ret != 0) { + goto next; + } + mod_info_destroy(&mod_info); + } else { + if ((ret = get_mod_path(&mod_info, proc_symbs->proc_id)) && ret != 0) { + goto next; + } + + if ((ret = get_mod_type(&mod_info)) && ret != 0) { + goto next; + } + + if ((ret = add_mod(elf_reader, proc_symbs, &mod_info)) && ret != 0) { + is_over = 1; + goto next; + } + } + continue; +next: + mod_info_destroy(&mod_info); + if (is_over) { + break; + } + } + + return is_over ? ret : 0; +} + +struct proc_symbs_s* proc_load_all_symbs(void *elf_reader, int proc_id, char *comm) +{ + int ret; + FILE* fp = NULL; + char maps_file[PATH_LEN]; + struct proc_symbs_s* proc_symbs; + + proc_symbs = (struct proc_symbs_s *)malloc(sizeof(struct proc_symbs_s)); + if (!proc_symbs) { + return NULL; + } + (void)memset(proc_symbs, 0, sizeof(struct proc_symbs_s)); + proc_symbs->proc_id = proc_id; + (void)strncpy(proc_symbs->comm, comm, TASK_COMM_LEN - 1); + + maps_file[0] = 0; + (void)snprintf(maps_file, PATH_LEN, "/proc/%d/maps", proc_id); + if (access(maps_file, 0)) { + goto err; + } + fp = fopen(maps_file, "r"); + if (!fp){ + ERROR("[SYMBOL]: Open proc maps-file failed.[%s].\n", maps_file); + goto err; + } + + if ((ret = proc_iter_maps(elf_reader, proc_symbs, fp)) && ret != 0) { + ERROR("[SYMBOL]: Iter proc maps failed[proc = %d, ret = %d].\n", proc_id, ret); + goto err; + } + + fclose(fp); +#ifdef GOPHER_DEBUG + __print_proc(proc_symbs); +#endif + return proc_symbs; +err: + proc_symbs_destroy(proc_symbs); + (void)free(proc_symbs); + if (fp) { + fclose(fp); + } + return NULL; +} + +void proc_delete_all_symbs(struct proc_symbs_s *proc_symbs) +{ + if (!proc_symbs) { + return; + } + + proc_symbs_destroy(proc_symbs); + (void)free(proc_symbs); + return; +} + +int proc_search_addr_symb(struct proc_symbs_s *proc_symbs, + u64 addr, struct addr_symb_s *addr_symb) +{ + int ret = -1, is_contain_range = 0; + u64 target_addr; + + addr_symb->orign_addr = addr; + for (int i = 0; i < proc_symbs->mods_count; i++) { + target_addr = 0; + if (proc_symbs->mods[i]) { + if ((ret = search_elf_symb(proc_symbs->mods[i]->debug_symbs, + addr, addr, proc_symbs->comm, addr_symb)) && ret == 0) { + break; + } + + if (is_mod_contain_addr(proc_symbs->mods[i], addr, &target_addr)) { + is_contain_range = 1; + if ((ret = search_elf_symb(proc_symbs->mods[i]->mod_symbs, + addr, target_addr, proc_symbs->comm, addr_symb)) && ret != 0) { + // __print_mod_symbs(proc_symbs->mods[i]); + } + break; + } + } + } + if (!is_contain_range) { + // __print_proc_ranges(proc_symbs); + } + + return ret; +} +#endif diff --git a/src/probes/extends/ebpf.probe/src/stackprobe/symbol.h b/src/probes/extends/ebpf.probe/src/stackprobe/symbol.h new file mode 100644 index 0000000000000000000000000000000000000000..178646da8a3793166b1db6aa87aa16ee56cef5c5 --- /dev/null +++ b/src/probes/extends/ebpf.probe/src/stackprobe/symbol.h @@ -0,0 +1,139 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + * gala-gopher licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * http://license.coscl.org.cn/MulanPSL2 + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v2 for more details. + * Author: Mr.lu + * Create: 2022-02-18 + * Description: symbol defined + ******************************************************************************/ +#ifndef __GOPHER_SYMBOL_H__ +#define __GOPHER_SYMBOL_H__ + +#pragma once + +#include "common.h" +#include "hash.h" + +// example: ffff800009294000 t __nft_trace_packet [nf_tables] +#if defined(__TARGET_ARCH_x86) +#define KERN_ADDR_SPACE (0x00FFFFFFFFFFFFFF) +#else +#define KERN_ADDR_SPACE (0x0) +#endif + +#define IS_KERN_DATA_SYMBOL(S) (((S) == 'B') || ((S) == 'b') || ((S) == 'd') \ + || ((S) == 'R') || ((S) == 'r') || ((S) == 'D')) + +struct ksymb_s { + u64 addr; + char *sym; + char *kmod; +}; + +struct ksymb_tbl_s { + u32 ksym_size; + struct ksymb_s ksyms[]; +}; + +struct addr_symb_s { + char *sym; // No release is required. + char *mod; // No release is required. + u64 orign_addr; + u64 offset; +}; + +void destroy_ksymbs_tbl(struct ksymb_tbl_s *ksym_tbl); +struct ksymb_tbl_s* create_ksymbs_tbl(void); +int search_kern_addr_symb(struct ksymb_tbl_s *ksymbs, u64 addr, struct addr_symb_s *addr_symb); +int sort_kern_syms(struct ksymb_tbl_s *ksymbs); +int load_kern_syms(struct ksymb_tbl_s *ksymbs); + +#define MOD_MAX_COUNT 1000 +#define SYMBS_MAX_COUNT 1000000 +#define SYMBS_STEP_COUNT 1000 +enum module_type { + MODULE_UNKNOW = 0, + MODULE_SO = 1, + MODULE_EXEC = 2, + MODULE_MAP, + MODULE_VDSO /* The virtual dynamically linked shared object. */ +}; + +struct symb_s { + char *symb_name; + u64 start; + u64 size; +}; + +struct mod_addr_rage_s { + u64 start; + u64 end; + u64 f_offset; +}; + +struct mod_info_s { + enum module_type type; + char *name; + char *path; + u64 elf_so_addr; + u64 elf_so_offset; + + u64 start; + u64 end; + u64 f_offset; + + u64 inode; +}; + +struct elf_symbo_s { + H_HANDLE; + u32 i_inode; + u32 refcnt; + char *elf; + u32 symbs_count; + u32 symbs_capability; + struct symb_s** __symbs; +}; + +#define MOD_ADDR_RANGE_COUNT 100 +struct mod_s { + struct mod_info_s __mod_info; + #define mod_type __mod_info.type + #define mod_name __mod_info.name + #define mod_path __mod_info.path + #define mod_elf_so_addr __mod_info.elf_so_addr + #define mod_elf_so_offset __mod_info.elf_so_offset + #define mod_start __mod_info.start + #define mod_end __mod_info.end + #define mod_f_offset __mod_info.f_offset + #define mod_inode __mod_info.inode + + u32 addr_ranges_count; + struct mod_addr_rage_s addr_ranges[MOD_ADDR_RANGE_COUNT]; + + struct elf_symbo_s *debug_symbs; + void *elf_reader; // No release is required. + + struct elf_symbo_s *mod_symbs; +}; + +struct proc_symbs_s { + int proc_id; + char comm[TASK_COMM_LEN]; + + u32 mods_count; + struct mod_s* mods[MOD_MAX_COUNT]; +}; + +struct proc_symbs_s* proc_load_all_symbs(void *elf_reader, int proc_id, char *comm); +void proc_delete_all_symbs(struct proc_symbs_s *proc_symbs); +int proc_search_addr_symb(struct proc_symbs_s *proc_symbs, + u64 addr, struct addr_symb_s *addr_symb); + +#endif