diff --git a/README.rst b/README.rst index 91aa1e314c56a..a8921793b74ff 100644 --- a/README.rst +++ b/README.rst @@ -1,3 +1,9 @@ +==================== +QEMU for AFLplusplus +==================== + +This fork of QEMU enable fuzzing userspace ELF binaries under AFL++. + =========== QEMU README =========== diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 16e4fe3ccd87e..13d74a26e139e 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -35,6 +35,7 @@ #include "exec/tb-lookup.h" #include "exec/log.h" #include "qemu/main-loop.h" +#include "qemu/selfmap.h" #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) #include "hw/i386/apic.h" #endif @@ -44,6 +45,984 @@ #include "sysemu/replay.h" #include "internal.h" +#include "qemuafl/common.h" +#include "qemuafl/imported/snapshot-inl.h" + +#include +#include +#ifndef AFL_QEMU_STATIC_BUILD + #include +#endif + +/*************************** + * VARIOUS AUXILIARY STUFF * + ***************************/ + +/* This is equivalent to afl-as.h: */ + +static unsigned char + dummy[MAP_SIZE]; /* costs MAP_SIZE but saves a few instructions */ +unsigned char *afl_area_ptr = dummy; /* Exported for afl_gen_trace */ + +/* Exported variables populated by the code patched into elfload.c: */ + +abi_ulong afl_entry_point, /* ELF entry point (_start) */ + afl_exit_point, /* ELF exit point */ + afl_start_code, /* .text start pointer */ + afl_end_code; /* .text end pointer */ + +struct vmrange* afl_instr_code; + +abi_ulong afl_persistent_addr, afl_persistent_ret_addr; +unsigned int afl_persistent_cnt; + +u8 afl_compcov_level; + +__thread abi_ulong afl_prev_loc; + +struct cmp_map *__afl_cmp_map; + +/* Set in the child process in forkserver mode: */ + +static int forkserver_installed = 0; +static int disable_caching = 0; + +unsigned char afl_fork_child; +unsigned int afl_forksrv_pid; +unsigned char is_persistent; +target_long persistent_stack_offset; +unsigned char persistent_first_pass = 1; +unsigned char persistent_exits; +unsigned char persistent_save_gpr; +unsigned char persistent_memory; +int persisent_retaddr_offset; + +struct api_regs saved_regs; + +u8 * shared_buf; +u32 *shared_buf_len; +u8 sharedmem_fuzzing; + +afl_persistent_hook_fn afl_persistent_hook_ptr; + +/* Instrumentation ratio: */ + +unsigned int afl_inst_rms = MAP_SIZE; /* Exported for afl_gen_trace */ + +/* Function declarations. */ + +static void afl_wait_tsl(CPUState *, int); +static void afl_request_tsl(target_ulong, target_ulong, uint32_t, uint32_t, + TranslationBlock *, int); + +/* Data structures passed around by the translate handlers: */ + +struct afl_tb { + + target_ulong pc; + target_ulong cs_base; + uint32_t flags; + uint32_t cf_mask; + +}; + +struct afl_chain { + + struct afl_tb last_tb; + uint32_t cf_mask; + int tb_exit; + +}; + +struct afl_tsl { + + struct afl_tb tb; + struct afl_chain chain; + char is_chain; + +}; + +/* Some forward decls: */ + +static inline TranslationBlock *tb_find(CPUState *, TranslationBlock *, int, + uint32_t); +static inline void tb_add_jump(TranslationBlock *tb, int n, + TranslationBlock *tb_next); +static void afl_map_shm_fuzz(void); + +/************************* + * ACTUAL IMPLEMENTATION * + *************************/ + +/* Snapshot memory */ + +struct saved_region { + + void* addr; + size_t size; + void* saved; + +}; + +abi_ulong saved_brk; +int lkm_snapshot; +struct saved_region* memory_snapshot; +size_t memory_snapshot_len; + +static void collect_memory_snapshot(void) { + + saved_brk = afl_get_brk(); + + FILE *fp; + char *line = NULL; + size_t len = 0; + ssize_t read; + uint64_t afl_shm_inode = 0; + char *afl_shm_id_str = getenv(SHM_ENV_VAR); + + fp = fopen("/proc/self/maps", "r"); + if (fp == NULL) { + fprintf(stderr, "[AFL] ERROR: cannot open /proc/self/maps\n"); + exit(1); + } + + if (afl_shm_id_str) { + afl_shm_inode = atoi(afl_shm_id_str); + } + + size_t memory_snapshot_allocd = 32; + if (!lkm_snapshot) + memory_snapshot = malloc(memory_snapshot_allocd * + sizeof(struct saved_region)); + + while ((read = getline(&line, &len, fp)) != -1) { + + int fields, dev_maj, dev_min, inode; + uint64_t min, max, offset; + char flag_r, flag_w, flag_x, flag_p; + char path[512] = ""; + + fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" + " %512s", &min, &max, &flag_r, &flag_w, &flag_x, + &flag_p, &offset, &dev_maj, &dev_min, &inode, path); + + if ((fields < 10) || (fields > 11) || !h2g_valid(min)) + continue; + + int flags = page_get_flags(h2g(min)); + + max = h2g_valid(max - 1) ? max : (uintptr_t)AFL_G2H(GUEST_ADDR_MAX) + 1; + if (page_check_range(h2g(min), max - min, flags) == -1) + continue; + + // When `libcompcov.so` is used, the shared memory used to track coverage + // is picked up here. Obviously, we don't want to reset that, as that + // would erase coverage tracking, so we skip it. + if (afl_shm_id_str && inode == afl_shm_inode) continue; + + if (lkm_snapshot) { + + afl_snapshot_include_vmrange((void*)min, (void*)max); + + } else { + + if (!(flags & PROT_WRITE)) continue; + + if (memory_snapshot_allocd == memory_snapshot_len) { + memory_snapshot_allocd *= 2; + memory_snapshot = realloc(memory_snapshot, memory_snapshot_allocd * + sizeof(struct saved_region)); + } + + void* saved = malloc(max - min); + memcpy(saved, (void*)min, max - min); + + size_t i = memory_snapshot_len++; + memory_snapshot[i].addr = (void*)min; + memory_snapshot[i].size = max - min; + memory_snapshot[i].saved = saved; + + } + + } + + if (lkm_snapshot) + afl_snapshot_take(AFL_SNAPSHOT_BLOCK | AFL_SNAPSHOT_FDS); + + fclose(fp); + +} + +static void restore_memory_snapshot(void) { + + afl_set_brk(saved_brk); + + if (lkm_snapshot) { + + afl_snapshot_restore(); + + } else { + + size_t i; + for (i = 0; i < memory_snapshot_len; ++i) { + + // TODO avoid munmap of snapshot pages + + memcpy(memory_snapshot[i].addr, memory_snapshot[i].saved, + memory_snapshot[i].size); + + } + + } + + afl_target_unmap_trackeds(); + +} + +/* Set up SHM region and initialize other stuff. */ + +static void afl_map_shm_fuzz(void) { + + char *id_str = getenv(SHM_FUZZ_ENV_VAR); + + if (id_str) { + + u32 shm_id = atoi(id_str); + u8 *map = (u8 *)shmat(shm_id, NULL, 0); + /* Whooooops. */ + + if (!map || map == (void *)-1) { + + perror("[AFL] ERROR: could not access fuzzing shared memory"); + exit(1); + + } + + shared_buf_len = (u32 *)map; + shared_buf = map + sizeof(u32); + + if (getenv("AFL_DEBUG")) { + + fprintf(stderr, "[AFL] DEBUG: successfully got fuzzing shared memory\n"); + + } + + } else { + + fprintf(stderr, + "[AFL] ERROR: variable for fuzzing shared memory is not set\n"); + exit(1); + + } + +} + +void afl_setup(void) { + + char *id_str = getenv(SHM_ENV_VAR), *inst_r = getenv("AFL_INST_RATIO"); + + int shm_id; + + if (inst_r) { + + unsigned int r; + + r = atoi(inst_r); + + if (r > 100) r = 100; + if (!r) r = 1; + + afl_inst_rms = MAP_SIZE * r / 100; + + } + + if (id_str) { + + shm_id = atoi(id_str); + afl_area_ptr = shmat(shm_id, NULL, 0); + + if (afl_area_ptr == (void *)-1) exit(1); + + /* With AFL_INST_RATIO set to a low value, we want to touch the bitmap + so that the parent doesn't give up on us. */ + + if (inst_r) afl_area_ptr[0] = 1; + + } + + disable_caching = getenv("AFL_QEMU_DISABLE_CACHE") != NULL; + + if (getenv("___AFL_EINS_ZWEI_POLIZEI___")) { // CmpLog forkserver + + id_str = getenv(CMPLOG_SHM_ENV_VAR); + + if (id_str) { + + u32 shm_id = atoi(id_str); + + __afl_cmp_map = shmat(shm_id, NULL, 0); + + if (__afl_cmp_map == (void *)-1) exit(1); + + } + + } + + if (getenv("AFL_INST_LIBS")) { + + afl_start_code = 0; + afl_end_code = (abi_ulong)-1; + + } + + if (getenv("AFL_CODE_START")) + afl_start_code = strtoll(getenv("AFL_CODE_START"), NULL, 16); + if (getenv("AFL_CODE_END")) + afl_end_code = strtoll(getenv("AFL_CODE_END"), NULL, 16); + + int have_names = 0; + if (getenv("AFL_QEMU_INST_RANGES")) { + char *str = getenv("AFL_QEMU_INST_RANGES"); + char *saveptr1, *saveptr2 = NULL, *save_pt1 = NULL; + char *pt1, *pt2, *pt3 = NULL; + + while (1) { + + pt1 = strtok_r(str, ",", &saveptr1); + if (pt1 == NULL) break; + str = NULL; + save_pt1 = strdup(pt1); + + pt2 = strtok_r(pt1, "-", &saveptr2); + pt3 = strtok_r(NULL, "-", &saveptr2); + + struct vmrange* n = calloc(1, sizeof(struct vmrange)); + n->next = afl_instr_code; + + if (pt3 == NULL) { // filename + have_names = 1; + n->start = (target_ulong)-1; + n->end = 0; + n->name = save_pt1; + } else { + n->start = strtoull(pt2, NULL, 16); + n->end = strtoull(pt3, NULL, 16); + if (n->start && n->end) { + n->name = NULL; + free(save_pt1); + } else { + have_names = 1; + n->start = (target_ulong)-1; + n->end = 0; + n->name = save_pt1; + } + } + + afl_instr_code = n; + + } + } + + if (getenv("AFL_QEMU_EXCLUDE_RANGES")) { + char *str = getenv("AFL_QEMU_EXCLUDE_RANGES"); + char *saveptr1, *saveptr2 = NULL, *save_pt1; + char *pt1, *pt2, *pt3 = NULL; + + while (1) { + + pt1 = strtok_r(str, ",", &saveptr1); + if (pt1 == NULL) break; + str = NULL; + save_pt1 = strdup(pt1); + + pt2 = strtok_r(pt1, "-", &saveptr2); + pt3 = strtok_r(NULL, "-", &saveptr2); + + struct vmrange* n = calloc(1, sizeof(struct vmrange)); + n->exclude = true; // These are "exclusion" regions. + n->next = afl_instr_code; + + if (pt3 == NULL) { // filename + have_names = 1; + n->start = (target_ulong)-1; + n->end = 0; + n->name = save_pt1; + } else { + n->start = strtoull(pt2, NULL, 16); + n->end = strtoull(pt3, NULL, 16); + if (n->start && n->end) { + n->name = NULL; + free(save_pt1); + } else { + have_names = 1; + n->start = (target_ulong)-1; + n->end = 0; + n->name = save_pt1; + } + } + + afl_instr_code = n; + + } + } + + if (have_names) { + GSList *map_info = read_self_maps(); + for (GSList *s = map_info; s; s = g_slist_next(s)) { + MapInfo *e = (MapInfo *) s->data; + + if (h2g_valid(e->start)) { + unsigned long min = e->start; + unsigned long max = e->end; + int flags = page_get_flags(h2g(min)); + + max = h2g_valid(max - 1) ? max : (uintptr_t) AFL_G2H(GUEST_ADDR_MAX) + 1; + + if (page_check_range(h2g(min), max - min, flags) == -1) { + continue; + } + + // Now that we have a valid guest address region, compare its + // name against the names we care about: + target_ulong gmin = h2g(min); + target_ulong gmax = h2g(max); + + struct vmrange* n = afl_instr_code; + while (n) { + if (n->name && strstr(e->path, n->name)) { + if (gmin < n->start) n->start = gmin; + if (gmax > n->end) n->end = gmax; + break; + } + n = n->next; + } + } + } + free_self_maps(map_info); + } + + if (getenv("AFL_DEBUG") && afl_instr_code) { + struct vmrange* n = afl_instr_code; + while (n) { + if (n->exclude) { + fprintf(stderr, "Exclude range: 0x%lx-0x%lx (%s)\n", + (unsigned long)n->start, (unsigned long)n->end, + n->name ? n->name : ""); + } else { + fprintf(stderr, "Instrument range: 0x%lx-0x%lx (%s)\n", + (unsigned long)n->start, (unsigned long)n->end, + n->name ? n->name : ""); + } + n = n->next; + } + } + + /* Maintain for compatibility */ + if (getenv("AFL_QEMU_COMPCOV")) { afl_compcov_level = 1; } + if (getenv("AFL_COMPCOV_LEVEL")) { + + afl_compcov_level = atoi(getenv("AFL_COMPCOV_LEVEL")); + + } + + /* pthread_atfork() seems somewhat broken in util/rcu.c, and I'm + not entirely sure what is the cause. This disables that + behaviour, and seems to work alright? */ + + rcu_disable_atfork(); + + if (getenv("AFL_QEMU_PERSISTENT_HOOK")) { + +#ifdef AFL_QEMU_STATIC_BUILD + + fprintf(stderr, + "[AFL] ERROR: you cannot use AFL_QEMU_PERSISTENT_HOOK when " + "afl-qemu-trace is static\n"); + exit(1); + +#else + + persistent_save_gpr = 1; + + void *plib = dlopen(getenv("AFL_QEMU_PERSISTENT_HOOK"), RTLD_NOW); + if (!plib) { + + fprintf(stderr, "[AFL] ERROR: invalid AFL_QEMU_PERSISTENT_HOOK=%s - %s\n", + getenv("AFL_QEMU_PERSISTENT_HOOK"), + dlerror()); + exit(1); + + } + + int (*afl_persistent_hook_init_ptr)(void) = + dlsym(plib, "afl_persistent_hook_init"); + if (afl_persistent_hook_init_ptr) + sharedmem_fuzzing = afl_persistent_hook_init_ptr(); + + afl_persistent_hook_ptr = dlsym(plib, "afl_persistent_hook"); + if (!afl_persistent_hook_ptr) { + + fprintf(stderr, + "[AFL] ERROR: failed to find the function " + "\"afl_persistent_hook\" in %s\n", + getenv("AFL_QEMU_PERSISTENT_HOOK")); + exit(1); + + } + +#endif + + } + + if (__afl_cmp_map) return; // no persistent for cmplog + + is_persistent = getenv("AFL_QEMU_PERSISTENT_ADDR") != NULL; + + if (is_persistent) + afl_persistent_addr = strtoll(getenv("AFL_QEMU_PERSISTENT_ADDR"), NULL, 0); + + if (getenv("AFL_QEMU_PERSISTENT_RET")) + afl_persistent_ret_addr = + strtoll(getenv("AFL_QEMU_PERSISTENT_RET"), NULL, 0); + /* If AFL_QEMU_PERSISTENT_RET is not specified patch the return addr */ + + if (getenv("AFL_QEMU_PERSISTENT_GPR")) persistent_save_gpr = 1; + if (getenv("AFL_QEMU_PERSISTENT_MEM")) + persistent_memory = 1; + + if (getenv("AFL_QEMU_PERSISTENT_RETADDR_OFFSET")) + persisent_retaddr_offset = + strtoll(getenv("AFL_QEMU_PERSISTENT_RETADDR_OFFSET"), NULL, 0); + + if (getenv("AFL_QEMU_PERSISTENT_CNT")) + afl_persistent_cnt = strtoll(getenv("AFL_QEMU_PERSISTENT_CNT"), NULL, 0); + else + afl_persistent_cnt = 0; + + if (getenv("AFL_QEMU_PERSISTENT_EXITS")) persistent_exits = 1; + + // TODO persistent exits for other archs not x86 + // TODO persistent mode for other archs not x86 + // TODO cmplog rtn for arm + + if (getenv("AFL_QEMU_SNAPSHOT")) { + + is_persistent = 1; + persistent_save_gpr = 1; + persistent_memory = 1; + persistent_exits = 1; + + if (afl_persistent_addr == 0) + afl_persistent_addr = strtoll(getenv("AFL_QEMU_SNAPSHOT"), NULL, 0); + + } + + if (persistent_memory && afl_snapshot_init() >= 0) + lkm_snapshot = 1; + + if (getenv("AFL_DEBUG")) { + if (is_persistent) + fprintf(stderr, "Persistent: 0x%lx [0x%lx] %s%s%s\n", + (unsigned long)afl_persistent_addr, + (unsigned long)afl_persistent_ret_addr, + (persistent_save_gpr ? "gpr ": ""), + (persistent_memory ? "mem ": ""), + (persistent_exits ? "exits ": "")); + } + +} + +/* Fork server logic, invoked once we hit _start. */ + +void afl_forkserver(CPUState *cpu) { + + if (forkserver_installed == 1) return; + forkserver_installed = 1; + + if (getenv("AFL_QEMU_DEBUG_MAPS")) open_self_maps(cpu->env_ptr, 1); + + //u32 map_size = 0; + unsigned char tmp[4] = {0}; + pid_t child_pid; + int t_fd[2]; + u8 child_stopped = 0; + u32 was_killed; + int status = 0; + + if (!getenv("AFL_OLD_FORKSERVER")) { + + // with the max ID value + if (MAP_SIZE <= FS_OPT_MAX_MAPSIZE) + status |= (FS_OPT_SET_MAPSIZE(MAP_SIZE) | FS_OPT_MAPSIZE); + if (lkm_snapshot) status |= FS_OPT_SNAPSHOT; + if (sharedmem_fuzzing != 0) status |= FS_OPT_SHDMEM_FUZZ; + if (status) status |= (FS_OPT_ENABLED | FS_OPT_NEWCMPLOG); + + } + + memcpy(tmp, &status, 4); + if (getenv("AFL_DEBUG")) + fprintf(stderr, "Debug: Sending status 0x%08x\n", status); + + /* Tell the parent that we're alive. If the parent doesn't want + to talk, assume that we're not running in forkserver mode. */ + + if (write(FORKSRV_FD + 1, tmp, 4) != 4) return; + + afl_forksrv_pid = getpid(); + + int first_run = 1; + + if (sharedmem_fuzzing) { + + if (read(FORKSRV_FD, &was_killed, 4) != 4) exit(2); + + if ((was_killed & (0xffffffff & (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ))) == + (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ)) + afl_map_shm_fuzz(); + else { + + fprintf(stderr, + "[AFL] ERROR: afl-fuzz is old and does not support" + " shmem input"); + exit(1); + + } + + } + + /* All right, let's await orders... */ + + while (1) { + + /* Whoops, parent dead? */ + + if (read(FORKSRV_FD, &was_killed, 4) != 4) exit(2); + + /* If we stopped the child in persistent mode, but there was a race + condition and afl-fuzz already issued SIGKILL, write off the old + process. */ + + if (child_stopped && was_killed) { + + child_stopped = 0; + if (waitpid(child_pid, &status, 0) < 0) exit(8); + + } + + if (!child_stopped) { + + /* Establish a channel with child to grab translation commands. We'll + read from t_fd[0], child will write to TSL_FD. */ + + if (pipe(t_fd) || dup2(t_fd[1], TSL_FD) < 0) exit(3); + close(t_fd[1]); + + child_pid = fork(); + if (child_pid < 0) exit(4); + + if (!child_pid) { + + /* Child process. Close descriptors and run free. */ + + afl_fork_child = 1; + close(FORKSRV_FD); + close(FORKSRV_FD + 1); + close(t_fd[0]); + return; + + } + + /* Parent. */ + + close(TSL_FD); + + } else { + + /* Special handling for persistent mode: if the child is alive but + currently stopped, simply restart it with SIGCONT. */ + + kill(child_pid, SIGCONT); + child_stopped = 0; + + } + + /* Parent. */ + + if (write(FORKSRV_FD + 1, &child_pid, 4) != 4) exit(5); + + /* Collect translation requests until child dies and closes the pipe. */ + + afl_wait_tsl(cpu, t_fd[0]); + + /* Get and relay exit status to parent. */ + + if (waitpid(child_pid, &status, is_persistent ? WUNTRACED : 0) < 0) exit(6); + + /* In persistent mode, the child stops itself with SIGSTOP to indicate + a successful run. In this case, we want to wake it up without forking + again. */ + + if (WIFSTOPPED(status)) + child_stopped = 1; + else if (unlikely(first_run && is_persistent)) { + + fprintf(stderr, "[AFL] ERROR: no persistent iteration executed\n"); + exit(12); // Persistent is wrong + + } + + first_run = 0; + + if (write(FORKSRV_FD + 1, &status, 4) != 4) exit(7); + + } + +} + +/* A simplified persistent mode handler, used as explained in + * llvm_mode/README.md. */ + +static u32 cycle_cnt; + +void afl_persistent_iter(CPUArchState *env) { + + static struct afl_tsl exit_cmd_tsl; + + if (!afl_persistent_cnt || --cycle_cnt) { + + if (persistent_memory) restore_memory_snapshot(); + + if (persistent_save_gpr && !afl_persistent_hook_ptr) { + afl_restore_regs(&saved_regs, env); + } + + if (!disable_caching) { + + memset(&exit_cmd_tsl, 0, sizeof(struct afl_tsl)); + exit_cmd_tsl.tb.pc = (target_ulong)(-1); + + if (write(TSL_FD, &exit_cmd_tsl, sizeof(struct afl_tsl)) != + sizeof(struct afl_tsl)) { + + /* Exit the persistent loop on pipe error */ + afl_area_ptr = dummy; + exit(0); + + } + + } + + // TODO use only pipe + raise(SIGSTOP); + + + // now we have shared_buf updated and ready to use + if (persistent_save_gpr && afl_persistent_hook_ptr) { + + struct api_regs hook_regs = saved_regs; + afl_persistent_hook_ptr(&hook_regs, guest_base, shared_buf, + *shared_buf_len); + afl_restore_regs(&hook_regs, env); + + } + + afl_area_ptr[0] = 1; + afl_prev_loc = 0; + + } else { + + afl_area_ptr = dummy; + exit(0); + + } + +} + +void afl_persistent_loop(CPUArchState *env) { + + if (!afl_fork_child) return; + + if (persistent_first_pass) { + + /* Make sure that every iteration of __AFL_LOOP() starts with a clean slate. + On subsequent calls, the parent will take care of that, but on the first + iteration, it's our job to erase any trace of whatever happened + before the loop. */ + + if (is_persistent) { + + memset(afl_area_ptr, 0, MAP_SIZE); + afl_area_ptr[0] = 1; + afl_prev_loc = 0; + + } + + if (persistent_memory) collect_memory_snapshot(); + + if (persistent_save_gpr) { + + afl_save_regs(&saved_regs, env); + + if (afl_persistent_hook_ptr) { + + struct api_regs hook_regs = saved_regs; + afl_persistent_hook_ptr(&hook_regs, guest_base, shared_buf, + *shared_buf_len); + afl_restore_regs(&hook_regs, env); + + } + + } + + cycle_cnt = afl_persistent_cnt; + persistent_first_pass = 0; + persistent_stack_offset = TARGET_LONG_BITS / 8; + + return; + + } + + if (is_persistent) { + + afl_persistent_iter(env); + + } + +} + +/* This code is invoked whenever QEMU decides that it doesn't have a + translation of a particular block and needs to compute it, or when it + decides to chain two TBs together. When this happens, we tell the parent to + mirror the operation, so that the next fork() has a cached copy. */ + +static void afl_request_tsl(target_ulong pc, target_ulong cb, uint32_t flags, + uint32_t cf_mask, TranslationBlock *last_tb, + int tb_exit) { + + if (disable_caching) return; + + struct afl_tsl t; + + if (!afl_fork_child) return; + + t.tb.pc = pc; + t.tb.cs_base = cb; + t.tb.flags = flags; + t.tb.cf_mask = cf_mask; + t.is_chain = (last_tb != NULL); + + if (t.is_chain) { + + t.chain.last_tb.pc = last_tb->pc; + t.chain.last_tb.cs_base = last_tb->cs_base; + t.chain.last_tb.flags = last_tb->flags; + t.chain.cf_mask = cf_mask; + t.chain.tb_exit = tb_exit; + + } + + if (write(TSL_FD, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) + return; + +} + +static inline TranslationBlock * +afl_tb_lookup(CPUState *cpu, target_ulong pc, target_ulong cs_base, + uint32_t flags, uint32_t cf_mask) +{ + TranslationBlock *tb; + uint32_t hash; + + hash = tb_jmp_cache_hash_func(pc); + tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); + + cf_mask &= ~CF_CLUSTER_MASK; + cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT; + + if (likely(tb && + tb->pc == pc && + tb->cs_base == cs_base && + tb->flags == flags && + tb->trace_vcpu_dstate == *cpu->trace_dstate && + (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == cf_mask)) { + return tb; + } + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask); + if (tb == NULL) { + return NULL; + } + qatomic_set(&cpu->tb_jmp_cache[hash], tb); + return tb; +} + +/* This is the other side of the same channel. Since timeouts are handled by + afl-fuzz simply killing the child, we can just wait until the pipe breaks. */ + +static void afl_wait_tsl(CPUState *cpu, int fd) { + + struct afl_tsl t; + TranslationBlock *tb, *last_tb; + + if (disable_caching) return; + + while (1) { + + u8 invalid_pc = 0; + + /* Broken pipe means it's time to return to the fork server routine. */ + + if (read(fd, &t, sizeof(struct afl_tsl)) != sizeof(struct afl_tsl)) break; + + /* Exit command for persistent */ + + if (t.tb.pc == (target_ulong)(-1)) return; + + tb = afl_tb_lookup(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask); + + if (!tb) { + + /* The child may request to transate a block of memory that is not + mapped in the parent (e.g. jitted code or dlopened code). + This causes a SIGSEV in gen_intermediate_code() and associated + subroutines. We simply avoid caching of such blocks. */ + + if (is_valid_addr(t.tb.pc)) { + + mmap_lock(); + tb = tb_gen_code(cpu, t.tb.pc, t.tb.cs_base, t.tb.flags, t.tb.cf_mask); + mmap_unlock(); + + } else { + + invalid_pc = 1; + + } + + } + + if (t.is_chain && !invalid_pc) { + + last_tb = afl_tb_lookup(cpu, t.chain.last_tb.pc, + t.chain.last_tb.cs_base, + t.chain.last_tb.flags, + t.chain.cf_mask); +#define TB_JMP_RESET_OFFSET_INVALID 0xffff + if (last_tb && (last_tb->jmp_reset_offset[t.chain.tb_exit] != + TB_JMP_RESET_OFFSET_INVALID)) { + + tb_add_jump(last_tb, t.chain.tb_exit, tb); + + } + + } + + } + + close(fd); + +} + /* -icount align implementation. */ typedef struct SyncClocks { @@ -421,11 +1400,13 @@ static inline TranslationBlock *tb_find(CPUState *cpu, TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; + bool was_translated = false, was_chained = false; tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); if (tb == NULL) { mmap_lock(); tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); + was_translated = true; mmap_unlock(); /* We add the TB in the virtual pc hash table for the fast lookup */ qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); @@ -442,6 +1423,11 @@ static inline TranslationBlock *tb_find(CPUState *cpu, /* See if we can patch the calling TB. */ if (last_tb) { tb_add_jump(last_tb, tb_exit, tb); + was_chained = true; + } + if (was_translated || was_chained) { + afl_request_tsl(pc, cs_base, flags, cf_mask, + was_chained ? last_tb : NULL, tb_exit); } return tb; } diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c index d736f4ff553a7..49df924c25847 100644 --- a/accel/tcg/tcg-runtime.c +++ b/accel/tcg/tcg-runtime.c @@ -32,6 +32,283 @@ #include "exec/log.h" #include "tcg/tcg.h" +#include "qemuafl/common.h" + +uint32_t afl_hash_ip(uint64_t); + +void HELPER(afl_entry_routine)(CPUArchState *env) { + + afl_forkserver(env_cpu(env)); + +} + +void HELPER(afl_persistent_routine)(CPUArchState *env) { + + afl_persistent_loop(env); + +} + +void HELPER(afl_compcov_16)(target_ulong cur_loc, target_ulong arg1, + target_ulong arg2) { + + register uintptr_t idx = cur_loc; + + if ((arg1 & 0xff00) == (arg2 & 0xff00)) { INC_AFL_AREA(idx); } + +} + +void HELPER(afl_compcov_32)(target_ulong cur_loc, target_ulong arg1, + target_ulong arg2) { + + register uintptr_t idx = cur_loc; + + if ((arg1 & 0xff000000) == (arg2 & 0xff000000)) { + + INC_AFL_AREA(idx + 2); + if ((arg1 & 0xff0000) == (arg2 & 0xff0000)) { + + INC_AFL_AREA(idx + 1); + if ((arg1 & 0xff00) == (arg2 & 0xff00)) { INC_AFL_AREA(idx); } + + } + + } + +} + +void HELPER(afl_compcov_64)(target_ulong cur_loc, target_ulong arg1, + target_ulong arg2) { + + register uintptr_t idx = cur_loc; + + if ((arg1 & 0xff00000000000000) == (arg2 & 0xff00000000000000)) { + + INC_AFL_AREA(idx + 6); + if ((arg1 & 0xff000000000000) == (arg2 & 0xff000000000000)) { + + INC_AFL_AREA(idx + 5); + if ((arg1 & 0xff0000000000) == (arg2 & 0xff0000000000)) { + + INC_AFL_AREA(idx + 4); + if ((arg1 & 0xff00000000) == (arg2 & 0xff00000000)) { + + INC_AFL_AREA(idx + 3); + if ((arg1 & 0xff000000) == (arg2 & 0xff000000)) { + + INC_AFL_AREA(idx + 2); + if ((arg1 & 0xff0000) == (arg2 & 0xff0000)) { + + INC_AFL_AREA(idx + 1); + if ((arg1 & 0xff00) == (arg2 & 0xff00)) { INC_AFL_AREA(idx); } + + } + + } + + } + + } + + } + + } + +} + +void HELPER(afl_cmplog_8)(target_ulong cur_loc, target_ulong arg1, + target_ulong arg2) { + + register uintptr_t k = (uintptr_t)cur_loc; + u32 hits = 0; + + if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) + __afl_cmp_map->headers[k].hits = 0; + + if (__afl_cmp_map->headers[k].hits == 0) { + + __afl_cmp_map->headers[k].type = CMP_TYPE_INS; + __afl_cmp_map->headers[k].shape = 0; + + } else { + + hits = __afl_cmp_map->headers[k].hits; + + } + + __afl_cmp_map->headers[k].hits = hits + 1; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = arg1; + __afl_cmp_map->log[k][hits].v1 = arg2; + +} + +void HELPER(afl_cmplog_16)(target_ulong cur_loc, target_ulong arg1, + target_ulong arg2) { + + register uintptr_t k = (uintptr_t)cur_loc; + u32 hits = 0; + + if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) + __afl_cmp_map->headers[k].hits = 0; + + if (__afl_cmp_map->headers[k].hits == 0) { + + __afl_cmp_map->headers[k].type = CMP_TYPE_INS; + __afl_cmp_map->headers[k].shape = 1; + + } else { + + hits = __afl_cmp_map->headers[k].hits; + + } + + __afl_cmp_map->headers[k].hits = hits + 1; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = arg1; + __afl_cmp_map->log[k][hits].v1 = arg2; + +} + +void HELPER(afl_cmplog_32)(target_ulong cur_loc, target_ulong arg1, + target_ulong arg2) { + + register uintptr_t k = (uintptr_t)cur_loc; + u32 hits = 0; + + if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) + __afl_cmp_map->headers[k].hits = 0; + + if (__afl_cmp_map->headers[k].hits == 0) { + + __afl_cmp_map->headers[k].type = CMP_TYPE_INS; + __afl_cmp_map->headers[k].shape = 3; + + } else { + + hits = __afl_cmp_map->headers[k].hits; + + } + + __afl_cmp_map->headers[k].hits = hits + 1; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = arg1; + __afl_cmp_map->log[k][hits].v1 = arg2; + +} + +void HELPER(afl_cmplog_64)(target_ulong cur_loc, target_ulong arg1, + target_ulong arg2) { + + register uintptr_t k = (uintptr_t)cur_loc; + u32 hits = 0; + + if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) + __afl_cmp_map->headers[k].hits = 0; + + if (__afl_cmp_map->headers[k].hits == 0) { + + __afl_cmp_map->headers[k].type = CMP_TYPE_INS; + __afl_cmp_map->headers[k].shape = 7; + + } else { + + hits = __afl_cmp_map->headers[k].hits; + + } + + __afl_cmp_map->headers[k].hits = hits + 1; + + hits &= CMP_MAP_H - 1; + __afl_cmp_map->log[k][hits].v0 = arg1; + __afl_cmp_map->log[k][hits].v1 = arg2; + +} + +#include +#include "linux-user/qemu.h" /* access_ok decls. */ + +/* +static int area_is_mapped(void *ptr, size_t len) { + + char *p = ptr; + char *page = (char *)((uintptr_t)p & ~(sysconf(_SC_PAGE_SIZE) - 1)); + + int r = msync(page, (p - page) + len, MS_ASYNC); + if (r < 0) return errno != ENOMEM; + return 1; + +} +*/ + +void HELPER(afl_cmplog_rtn)(CPUArchState *env) { + +#if defined(TARGET_X86_64) + + target_ulong arg1 = env->regs[R_EDI]; + target_ulong arg2 = env->regs[R_ESI]; + +#elif defined(TARGET_I386) + + target_ulong *stack = AFL_G2H(env->regs[R_ESP]); + + if (!access_ok(env_cpu(env), VERIFY_READ, env->regs[R_ESP], + sizeof(target_ulong) * 2)) + return; + + // when this hook is executed, the retaddr is not on stack yet + target_ulong arg1 = stack[0]; + target_ulong arg2 = stack[1]; + +#else + + // stupid code to make it compile + target_ulong arg1 = 0; + target_ulong arg2 = 0; + return; + +#endif + + if (!access_ok(env_cpu(env), VERIFY_READ, arg1, 0x20) || + !access_ok(env_cpu(env), VERIFY_READ, arg2, 0x20)) + return; + + void *ptr1 = AFL_G2H(arg1); + void *ptr2 = AFL_G2H(arg2); + +#if defined(TARGET_X86_64) || defined(TARGET_I386) + uintptr_t k = (uintptr_t)env->eip; +#else + uintptr_t k = 0; +#endif + + k = (uintptr_t)(afl_hash_ip((uint64_t)k)); + k &= (CMP_MAP_W - 1); + + u32 hits = 0; + + if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) { + __afl_cmp_map->headers[k].type = CMP_TYPE_RTN; + __afl_cmp_map->headers[k].hits = 0; + __afl_cmp_map->headers[k].shape = 30; + } else { + hits = __afl_cmp_map->headers[k].hits; + } + + __afl_cmp_map->headers[k].hits += 1; + + hits &= CMP_MAP_RTN_H - 1; + ((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v0_len = 31; + ((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v1_len = 31; + __builtin_memcpy(((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v0, + ptr1, 31); + __builtin_memcpy(((struct cmpfn_operands *)__afl_cmp_map->log[k])[hits].v1, + ptr2, 31); + +} + /* 32-bit helpers */ int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2) @@ -168,3 +445,541 @@ void HELPER(exit_atomic)(CPUArchState *env) { cpu_loop_exit_atomic(env_cpu(env), GETPC()); } + +///////////////////////////////////////////////// +// QASAN +///////////////////////////////////////////////// + +#include "qemuafl/qasan-qemu.h" + +// options +int qasan_max_call_stack = 16; // QASAN_MAX_CALL_STACK +int qasan_symbolize = 1; // QASAN_SYMBOLIZE +int use_qasan = 0; + +__thread int qasan_disabled; + +__thread struct shadow_stack qasan_shadow_stack; + +#ifdef ASAN_GIOVESE + +#ifndef DO_NOT_USE_QASAN + +#include "qemuafl/asan-giovese-inl.h" + +#include +#include + +void asan_giovese_populate_context(struct call_context* ctx, target_ulong pc) { + + ctx->size = MIN(qasan_shadow_stack.size, qasan_max_call_stack -1) +1; + ctx->addresses = calloc(sizeof(void*), ctx->size); + +#ifdef __NR_gettid + ctx->tid = (uint32_t)syscall(__NR_gettid); +#else + pthread_id_np_t tid; + pthread_t self = pthread_self(); + pthread_getunique_np(&self, &tid); + ctx->tid = (uint32_t)tid; +#endif + + ctx->addresses[0] = pc; + + if (qasan_shadow_stack.size <= 0) return; //can be negative when pop does not find nothing + + int i, j = 1; + for (i = qasan_shadow_stack.first->index -1; i >= 0 && j < qasan_max_call_stack; --i) + ctx->addresses[j++] = qasan_shadow_stack.first->buf[i]; + + struct shadow_stack_block* b = qasan_shadow_stack.first->next; + while (b && j < qasan_max_call_stack) { + + for (i = SHADOW_BK_SIZE-1; i >= 0; --i) + ctx->addresses[j++] = b->buf[i]; + + } + +} + +static void addr2line_cmd(char* lib, uintptr_t off, char** function, char** line) { + + if (!qasan_symbolize) goto addr2line_cmd_skip; + + FILE *fp; + + size_t cmd_siz = 128 + strlen(lib); + char* cmd = malloc(cmd_siz); + snprintf(cmd, cmd_siz, "addr2line -f -e '%s' 0x%lx", lib, off); + + fp = popen(cmd, "r"); + free(cmd); + + if (fp == NULL) goto addr2line_cmd_skip; + + *function = malloc(PATH_MAX + 32); + + if (!fgets(*function, PATH_MAX + 32, fp) || !strncmp(*function, "??", 2)) { + + free(*function); + *function = NULL; + + } else { + + size_t l = strlen(*function); + if (l && (*function)[l-1] == '\n') + (*function)[l-1] = 0; + + } + + *line = malloc(PATH_MAX + 32); + + if (!fgets(*line, PATH_MAX + 32, fp) || !strncmp(*line, "??:", 3) || + !strncmp(*line, ":?", 2)) { + + free(*line); + *line = NULL; + + } else { + + size_t l = strlen(*line); + if (l && (*line)[l-1] == '\n') + (*line)[l-1] = 0; + + } + + pclose(fp); + + return; + +addr2line_cmd_skip: + *line = NULL; + *function = NULL; + +} + +char* asan_giovese_printaddr(target_ulong guest_addr) { + + FILE *fp; + char *line = NULL; + size_t len = 0; + ssize_t read; + + fp = fopen("/proc/self/maps", "r"); + if (fp == NULL) + return NULL; + + uint64_t img_min = 0; //, img_max = 0; + char img_path[512] = {0}; + + while ((read = getline(&line, &len, fp)) != -1) { + + int fields, dev_maj, dev_min, inode; + uint64_t min, max, offset; + char flag_r, flag_w, flag_x, flag_p; + char path[512] = ""; + fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d" + " %512s", &min, &max, &flag_r, &flag_w, &flag_x, + &flag_p, &offset, &dev_maj, &dev_min, &inode, path); + + if ((fields < 10) || (fields > 11)) + continue; + + if (h2g_valid(min)) { + + int flags = page_get_flags(h2g(min)); + max = h2g_valid(max - 1) ? max : (uintptr_t)AFL_G2H(GUEST_ADDR_MAX) + 1; + if (page_check_range(h2g(min), max - min, flags) == -1) + continue; + + if (img_min && !strcmp(img_path, path)) { + //img_max = max; + } else { + img_min = min; + //img_max = max; + strncpy(img_path, path, 512); + } + + if (guest_addr >= h2g(min) && guest_addr < h2g(max - 1) + 1) { + + uintptr_t off = guest_addr - h2g(img_min); + + char* s; + char * function = NULL; + char * codeline = NULL; + if (strlen(path)) { + addr2line_cmd(path, off, &function, &codeline); + if (!function) + addr2line_cmd(path, guest_addr, &function, &codeline); + } + + if (function) { + + if (codeline) { + + size_t l = strlen(function) + strlen(codeline) + 32; + s = malloc(l); + snprintf(s, l, " in %s %s", function, codeline); + free(codeline); + + } else { + + size_t l = strlen(function) + strlen(path) + 32; + s = malloc(l); + snprintf(s, l, " in %s (%s+0x%lx)", function, path, + off); + + } + + free(function); + + } else { + + size_t l = strlen(path) + 32; + s = malloc(l); + snprintf(s, l, " (%s+0x%lx)", path, off); + + } + + free(line); + fclose(fp); + return s; + + } + + } + + } + + free(line); + fclose(fp); + + return NULL; + +} + +#endif + +void HELPER(qasan_shadow_stack_push)(target_ulong ptr) { + +#ifndef DO_NOT_USE_QASAN +#if defined(TARGET_ARM) + ptr &= ~1; +#endif + + if (unlikely(!qasan_shadow_stack.first)) { + + qasan_shadow_stack.first = malloc(sizeof(struct shadow_stack_block)); + qasan_shadow_stack.first->index = 0; + qasan_shadow_stack.size = 0; // may be negative due to last pop + qasan_shadow_stack.first->next = NULL; + + } + + qasan_shadow_stack.first->buf[qasan_shadow_stack.first->index++] = ptr; + qasan_shadow_stack.size++; + + if (qasan_shadow_stack.first->index >= SHADOW_BK_SIZE) { + + struct shadow_stack_block* ns = malloc(sizeof(struct shadow_stack_block)); + ns->next = qasan_shadow_stack.first; + ns->index = 0; + qasan_shadow_stack.first = ns; + } +#endif + +} + +void HELPER(qasan_shadow_stack_pop)(target_ulong ptr) { + +#ifndef DO_NOT_USE_QASAN +#if defined(TARGET_ARM) + ptr &= ~1; +#endif + + struct shadow_stack_block* cur_bk = qasan_shadow_stack.first; + if (unlikely(cur_bk == NULL)) return; + + do { + + cur_bk->index--; + qasan_shadow_stack.size--; + + if (cur_bk->index < 0) { + + struct shadow_stack_block* ns = cur_bk->next; + free(cur_bk); + cur_bk = ns; + if (!cur_bk) break; + cur_bk->index--; + } + + } while(cur_bk->buf[cur_bk->index] != ptr); + + qasan_shadow_stack.first = cur_bk; +#endif + +} + +#endif + +target_long qasan_actions_dispatcher(void *cpu_env, + target_long action, target_long arg1, + target_long arg2, target_long arg3) { + +#ifndef DO_NOT_USE_QASAN + CPUArchState *env = cpu_env; + + switch(action) { +#ifdef ASAN_GIOVESE + case QASAN_ACTION_CHECK_LOAD: + if (asan_giovese_guest_loadN(arg1, arg2)) { + asan_giovese_report_and_crash(ACCESS_TYPE_LOAD, arg1, arg2, env); + } + break; + + case QASAN_ACTION_CHECK_STORE: + if (asan_giovese_guest_storeN(arg1, arg2)) { + asan_giovese_report_and_crash(ACCESS_TYPE_STORE, arg1, arg2, env); + } + break; + + case QASAN_ACTION_POISON: + asan_giovese_poison_guest_region(arg1, arg2, arg3); + break; + + case QASAN_ACTION_USER_POISON: + asan_giovese_user_poison_guest_region(arg1, arg2); + break; + + case QASAN_ACTION_UNPOISON: + asan_giovese_unpoison_guest_region(arg1, arg2); + break; + + case QASAN_ACTION_IS_POISON: + return asan_giovese_guest_loadN(arg1, arg2); + + case QASAN_ACTION_ALLOC: { + struct call_context* ctx = calloc(sizeof(struct call_context), 1); + asan_giovese_populate_context(ctx, PC_GET(env)); + asan_giovese_alloc_insert(arg1, arg2, ctx); + break; + } + + case QASAN_ACTION_DEALLOC: { + struct chunk_info* ckinfo = asan_giovese_alloc_search(arg1); + if (ckinfo) { + if (ckinfo->start != arg1) + asan_giovese_badfree(arg1, PC_GET(env)); + ckinfo->free_ctx = calloc(sizeof(struct call_context), 1); + asan_giovese_populate_context(ckinfo->free_ctx, PC_GET(env)); + } else { + asan_giovese_badfree(arg1, PC_GET(env)); + } + break; + } +#else + case QASAN_ACTION_CHECK_LOAD: + __asan_loadN(AFL_G2H(arg1), arg2); + break; + + case QASAN_ACTION_CHECK_STORE: + __asan_storeN(AFL_G2H(arg1), arg2); + break; + + case QASAN_ACTION_POISON: + __asan_poison_memory_region(AFL_G2H(arg1), arg2); + break; + + case QASAN_ACTION_USER_POISON: + __asan_poison_memory_region(AFL_G2H(arg1), arg2); + break; + + case QASAN_ACTION_UNPOISON: + __asan_unpoison_memory_region(AFL_G2H(arg1), arg2); + break; + + case QASAN_ACTION_IS_POISON: + return __asan_region_is_poisoned(AFL_G2H(arg1), arg2) != NULL; + + case QASAN_ACTION_ALLOC: + break; + + case QASAN_ACTION_DEALLOC: + break; +#endif + + case QASAN_ACTION_ENABLE: + qasan_disabled = 0; + break; + + case QASAN_ACTION_DISABLE: + qasan_disabled = 1; + break; + + case QASAN_ACTION_SWAP_STATE: { + int r = qasan_disabled; + qasan_disabled = arg1; + return r; + } + + default: + fprintf(stderr, "Invalid QASAN action " TARGET_FMT_ld "\n", action); + abort(); + } +#endif + + return 0; +} + +dh_ctype(tl) HELPER(qasan_fake_instr)(CPUArchState *env, dh_ctype(tl) action, + dh_ctype(tl) arg1, dh_ctype(tl) arg2, + dh_ctype(tl) arg3) { + + return qasan_actions_dispatcher(env, action, arg1, arg2, arg3); + +} + +void HELPER(qasan_load1)(CPUArchState *env, target_ulong addr) { + +#ifndef DO_NOT_USE_QASAN + if (qasan_disabled) return; + + void* ptr = (void*)AFL_G2H(addr); + +#ifdef ASAN_GIOVESE + if (asan_giovese_load1(ptr)) { + asan_giovese_report_and_crash(ACCESS_TYPE_LOAD, addr, 1, env); + } +#else + __asan_load1(ptr); +#endif +#endif + +} + +void HELPER(qasan_load2)(CPUArchState *env, target_ulong addr) { + +#ifndef DO_NOT_USE_QASAN + if (qasan_disabled) return; + + void* ptr = (void*)AFL_G2H(addr); + +#ifdef ASAN_GIOVESE + if (asan_giovese_load2(ptr)) { + asan_giovese_report_and_crash(ACCESS_TYPE_LOAD, addr, 2, env); + } +#else + __asan_load2(ptr); +#endif +#endif + +} + +void HELPER(qasan_load4)(CPUArchState *env, target_ulong addr) { + +#ifndef DO_NOT_USE_QASAN + if (qasan_disabled) return; + + void* ptr = (void*)AFL_G2H(addr); + +#ifdef ASAN_GIOVESE + if (asan_giovese_load4(ptr)) { + asan_giovese_report_and_crash(ACCESS_TYPE_LOAD, addr, 4, env); + } +#else + __asan_load4(ptr); +#endif +#endif + +} + +void HELPER(qasan_load8)(CPUArchState *env, target_ulong addr) { + +#ifndef DO_NOT_USE_QASAN + if (qasan_disabled) return; + + void* ptr = (void*)AFL_G2H(addr); + +#ifdef ASAN_GIOVESE + if (asan_giovese_load8(ptr)) { + asan_giovese_report_and_crash(ACCESS_TYPE_LOAD, addr, 8, env); + } +#else + __asan_load8(ptr); +#endif +#endif + +} + +void HELPER(qasan_store1)(CPUArchState *env, target_ulong addr) { + +#ifndef DO_NOT_USE_QASAN + if (qasan_disabled) return; + + void* ptr = (void*)AFL_G2H(addr); + +#ifdef ASAN_GIOVESE + if (asan_giovese_store1(ptr)) { + asan_giovese_report_and_crash(ACCESS_TYPE_STORE, addr, 1, env); + } +#else + __asan_store1(ptr); +#endif +#endif + +} + +void HELPER(qasan_store2)(CPUArchState *env, target_ulong addr) { + +#ifndef DO_NOT_USE_QASAN + if (qasan_disabled) return; + + void* ptr = (void*)AFL_G2H(addr); + +#ifdef ASAN_GIOVESE + if (asan_giovese_store2(ptr)) { + asan_giovese_report_and_crash(ACCESS_TYPE_STORE, addr, 2, env); + } +#else + __asan_store2(ptr); +#endif +#endif + +} + +void HELPER(qasan_store4)(CPUArchState *env, target_ulong addr) { + +#ifndef DO_NOT_USE_QASAN + if (qasan_disabled) return; + + void* ptr = (void*)AFL_G2H(addr); + +#ifdef ASAN_GIOVESE + if (asan_giovese_store4(ptr)) { + asan_giovese_report_and_crash(ACCESS_TYPE_STORE, addr, 4, env); + } +#else + __asan_store4(ptr); +#endif +#endif + +} + +void HELPER(qasan_store8)(CPUArchState *env, target_ulong addr) { + +#ifndef DO_NOT_USE_QASAN + if (qasan_disabled) return; + + void* ptr = (void*)AFL_G2H(addr); + +#ifdef ASAN_GIOVESE + if (asan_giovese_store8(ptr)) { + asan_giovese_report_and_crash(ACCESS_TYPE_STORE, addr, 8, env); + } +#else + __asan_store8(ptr); +#endif +#endif + +} diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h index 91a5b7e85f59e..9a73f5bb36f55 100644 --- a/accel/tcg/tcg-runtime.h +++ b/accel/tcg/tcg-runtime.h @@ -331,3 +331,28 @@ DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_1(afl_entry_routine, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_1(afl_persistent_routine, TCG_CALL_NO_RWG, void, env) +DEF_HELPER_FLAGS_1(afl_maybe_log, TCG_CALL_NO_RWG, void, tl) +DEF_HELPER_FLAGS_1(afl_maybe_log_trace, TCG_CALL_NO_RWG, void, tl) +DEF_HELPER_FLAGS_3(afl_compcov_16, TCG_CALL_NO_RWG, void, tl, tl, tl) +DEF_HELPER_FLAGS_3(afl_compcov_32, TCG_CALL_NO_RWG, void, tl, tl, tl) +DEF_HELPER_FLAGS_3(afl_compcov_64, TCG_CALL_NO_RWG, void, tl, tl, tl) +DEF_HELPER_FLAGS_3(afl_cmplog_8, TCG_CALL_NO_RWG, void, tl, tl, tl) +DEF_HELPER_FLAGS_3(afl_cmplog_16, TCG_CALL_NO_RWG, void, tl, tl, tl) +DEF_HELPER_FLAGS_3(afl_cmplog_32, TCG_CALL_NO_RWG, void, tl, tl, tl) +DEF_HELPER_FLAGS_3(afl_cmplog_64, TCG_CALL_NO_RWG, void, tl, tl, tl) +DEF_HELPER_FLAGS_1(afl_cmplog_rtn, TCG_CALL_NO_RWG, void, env) + +DEF_HELPER_FLAGS_5(qasan_fake_instr, TCG_CALL_NO_RWG, tl, env, tl, tl, tl, tl) +DEF_HELPER_FLAGS_2(qasan_load1, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(qasan_load2, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(qasan_load4, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(qasan_load8, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(qasan_store1, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(qasan_store2, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(qasan_store4, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_2(qasan_store8, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_1(qasan_shadow_stack_push, TCG_CALL_NO_RWG, void, tl) +DEF_HELPER_FLAGS_1(qasan_shadow_stack_pop, TCG_CALL_NO_RWG, void, tl) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index bbd919a39328f..cbc9f6818bf14 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -63,6 +63,85 @@ #include "hw/core/tcg-cpu-ops.h" #include "internal.h" +#include "qemuafl/common.h" +#include "tcg/tcg-op.h" +#include "qemuafl/imported/afl_hash.h" + +#include + +__thread int cur_block_is_good; + +static int afl_track_unstable_log_fd(void) { + static bool initialized = false; + static int track_fd = -1; + if (unlikely(!initialized)) { + char * fname = getenv("AFL_QEMU_TRACK_UNSTABLE"); + if (fname != NULL) { + track_fd = open(fname, O_WRONLY | O_APPEND | O_CREAT, S_IRUSR); + } + initialized = true; + if (track_fd > 0) dprintf(track_fd, "QEMU UNSTABLE TRACKING ENABLED\n"); + } + return track_fd; +} + +void HELPER(afl_maybe_log)(target_ulong cur_loc) { + register uintptr_t afl_idx = cur_loc ^ afl_prev_loc; + + INC_AFL_AREA(afl_idx); + + // afl_prev_loc = ((cur_loc & (MAP_SIZE - 1) >> 1)) | + // ((cur_loc & 1) << ((int)ceil(log2(MAP_SIZE)) -1)); + afl_prev_loc = cur_loc >> 1; +} + +void HELPER(afl_maybe_log_trace)(target_ulong cur_loc) { + register uintptr_t afl_idx = cur_loc; + INC_AFL_AREA(afl_idx); +} + +static target_ulong pc_hash(target_ulong x) { + x = ((x >> 16) ^ x) * 0x45d9f3b; + x = ((x >> 16) ^ x) * 0x45d9f3b; + x = (x >> 16) ^ x; + return x; +} + +/* Generates TCG code for AFL's tracing instrumentation. */ +static void afl_gen_trace(target_ulong cur_loc) { + + /* Optimize for cur_loc > afl_end_code, which is the most likely case on + Linux systems. */ + + cur_block_is_good = afl_must_instrument(cur_loc); + + if (!cur_block_is_good) + return; + + /* Looks like QEMU always maps to fixed locations, so ASLR is not a + concern. Phew. But instruction addresses may be aligned. Let's mangle + the value to get something quasi-uniform. */ + + // cur_loc = (cur_loc >> 4) ^ (cur_loc << 8); + // cur_loc &= MAP_SIZE - 1; + cur_loc = (uintptr_t)(afl_hash_ip((uint64_t)cur_loc)); + cur_loc &= (MAP_SIZE - 1); + + /* Implement probabilistic instrumentation by looking at scrambled block + address. This keeps the instrumented locations stable across runs. */ + + if (cur_loc >= afl_inst_rms) return; + + TCGv cur_loc_v = tcg_const_tl(cur_loc); + if (unlikely(afl_track_unstable_log_fd() >= 0)) { + gen_helper_afl_maybe_log_trace(cur_loc_v); + } else { + gen_helper_afl_maybe_log(cur_loc_v); + } + tcg_temp_free(cur_loc_v); + +} + /* #define DEBUG_TB_INVALIDATE */ /* #define DEBUG_TB_FLUSH */ /* make various TB consistency checks */ @@ -1839,6 +1918,102 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, return tb; } +/* Called with mmap_lock held for user mode emulation. */ +TranslationBlock *afl_gen_edge(CPUState *cpu, unsigned long afl_id) +{ + CPUArchState *env = cpu->env_ptr; + TranslationBlock *tb; + tcg_insn_unit *gen_code_buf; + int gen_code_size, search_size; + + assert_memory_lock(); + + buffer_overflow1: + tb = tcg_tb_alloc(tcg_ctx); + if (unlikely(!tb)) { + /* flush must be done */ + tb_flush(cpu); + mmap_unlock(); + /* Make the execution loop process the flush as soon as possible. */ + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit(cpu); + } + + gen_code_buf = tcg_ctx->code_gen_ptr; + tb->tc.ptr = gen_code_buf; + tb->pc = 0; + tb->cs_base = 0; + tb->flags = 0; + tb->cflags = 0; + tb->trace_vcpu_dstate = *cpu->trace_dstate; + tcg_ctx->tb_cflags = 0; + + tcg_func_start(tcg_ctx); + + tcg_ctx->cpu = env_cpu(env); + + target_ulong afl_loc = afl_id & (MAP_SIZE -1); + //*afl_dynamic_size = MAX(*afl_dynamic_size, afl_loc); + TCGv tmp0 = tcg_const_tl(afl_loc); + gen_helper_afl_maybe_log(tmp0); + tcg_temp_free(tmp0); + tcg_gen_goto_tb(0); + tcg_gen_exit_tb(tb, 0); + + tcg_ctx->cpu = NULL; + + trace_translate_block(tb, tb->pc, tb->tc.ptr); + + /* generate machine code */ + tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; + tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; + tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset; + if (TCG_TARGET_HAS_direct_jump) { + tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg; + tcg_ctx->tb_jmp_target_addr = NULL; + } else { + tcg_ctx->tb_jmp_insn_offset = NULL; + tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg; + } + + /* ??? Overflow could be handled better here. In particular, we + don't need to re-do gen_intermediate_code, nor should we re-do + the tcg optimization currently hidden inside tcg_gen_code. All + that should be required is to flush the TBs, allocate a new TB, + re-initialize it per above, and re-do the actual code generation. */ + gen_code_size = tcg_gen_code(tcg_ctx, tb); + if (unlikely(gen_code_size < 0)) { + goto buffer_overflow1; + } + search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); + if (unlikely(search_size < 0)) { + goto buffer_overflow1; + } + tb->tc.size = gen_code_size; + + qatomic_set(&tcg_ctx->code_gen_ptr, (void *) + ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, + CODE_GEN_ALIGN)); + + /* init jump list */ + qemu_spin_init(&tb->jmp_lock); + tb->jmp_list_head = (uintptr_t)NULL; + tb->jmp_list_next[0] = (uintptr_t)NULL; + tb->jmp_list_next[1] = (uintptr_t)NULL; + tb->jmp_dest[0] = (uintptr_t)NULL; + tb->jmp_dest[1] = (uintptr_t)NULL; + + /* init original jump addresses which have been set during tcg_gen_code() */ + if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) { + tb_reset_jump(tb, 0); + } + if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) { + tb_reset_jump(tb, 1); + } + + return tb; +} + /* Called with mmap_lock held for user mode emulation. */ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, @@ -1914,12 +2089,26 @@ TranslationBlock *tb_gen_code(CPUState *cpu, tcg_func_start(tcg_ctx); tcg_ctx->cpu = env_cpu(env); + afl_gen_trace(pc); gen_intermediate_code(cpu, tb, max_insns); tcg_ctx->cpu = NULL; max_insns = tb->icount; trace_translate_block(tb, tb->pc, tb->tc.ptr); + /* If we are tracking block instability, then since afl-fuzz will log the ids + of the unstable blocks, in fuzzer_stats, we must log these alongside the + instruction pointer so that the user can associate these back with the + actual binary */ + int track_fd = afl_track_unstable_log_fd(); + if (unlikely(track_fd >= 0)) { + uint64_t ip = (uint64_t)pc; + uintptr_t block_id = (uintptr_t)(afl_hash_ip(ip)); + block_id &= (MAP_SIZE - 1); + dprintf(track_fd, "BLOCK ID: 0x%016" PRIx64 ", PC: 0x%016zx-0x%016zx\n", + block_id, ip, ip + tb->size); + } + /* generate machine code */ tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID; tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID; diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c index 2dfc27102f94b..b148671c340a9 100644 --- a/accel/tcg/translator.c +++ b/accel/tcg/translator.c @@ -19,6 +19,8 @@ #include "exec/plugin-gen.h" #include "sysemu/replay.h" +#include "qemuafl/common.h" + /* Pairs with tcg_clear_temp_count. To be called by #TranslatorOps.{translate_insn,tb_stop} if (1) the target is sufficiently clean to support reporting, @@ -91,6 +93,34 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, } } + if (db->pc_next == afl_entry_point) { + static bool first = true; + /* + * We guard this section since we flush the translation cache after + * we load the configuration, which in turn means we will need to + * re-translate our block. If we were to perform this flush every + * time (rather than just when our configuration is first loaded), + * we would just end up translation this block repeatedly. + */ + if (first) { + afl_setup(); + /* + * We flush the translation cache here since we may already have + * translated some blocks and included instrumentation in them + * before we have processed the configuration from the + * environment variables which configures which ranges to + * include and exclude. Therefore we may have some blocks in our + * cache which are incorrectly instrumented and cause some + * fuzzing stability or performance problems. + */ + tb_flush(cpu); + first = false; + } + gen_helper_afl_entry_routine(cpu_env); + } else if (db->pc_next == afl_exit_point) { + _exit(0); + } + /* Disassemble one instruction. The translate_insn hook should update db->pc_next and db->is_jmp to indicate what should be done next -- either exiting this loop or locate the start of diff --git a/configure b/configure index cc435e2503fed..0843d10a3a2e7 100755 --- a/configure +++ b/configure @@ -798,6 +798,7 @@ Linux) linux="yes" linux_user="yes" vhost_user=${default_feature:-yes} + QEMU_LDFLAGS="-ldl $QEMU_LDFLAGS" ;; esac @@ -2135,6 +2136,10 @@ EOF fi fi +if cc_has_warning_flag "-Wno-unused-function"; then + QEMU_CFLAGS="$QEMU_CFLAGS -Wno-unused-function" +fi + # Disable -Wmissing-braces on older compilers that warn even for # the "universal" C zero initializer {0}. cat > $TMPC << EOF @@ -5679,7 +5684,9 @@ if test "$gio" = "yes" ; then echo "CONFIG_GIO=y" >> $config_host_mak echo "GIO_CFLAGS=$gio_cflags" >> $config_host_mak echo "GIO_LIBS=$gio_libs" >> $config_host_mak - echo "GDBUS_CODEGEN=$gdbus_codegen" >> $config_host_mak + if [ -n "$gdbus_codegen" ]; then + echo "GDBUS_CODEGEN=$gdbus_codegen" >> $config_host_mak + fi fi echo "CONFIG_TLS_PRIORITY=\"$tls_priority\"" >> $config_host_mak if test "$gnutls" = "yes" ; then diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile index b9d7935e5ef0c..44b355cbb0a4b 100644 --- a/contrib/plugins/Makefile +++ b/contrib/plugins/Makefile @@ -18,6 +18,7 @@ NAMES += hotpages NAMES += howvec NAMES += lockstep NAMES += hwprofile +NAMES += drcov SONAMES := $(addsuffix .so,$(addprefix lib,$(NAMES))) diff --git a/contrib/plugins/drcov.c b/contrib/plugins/drcov.c new file mode 100644 index 0000000000000..94fd2764aca5e --- /dev/null +++ b/contrib/plugins/drcov.c @@ -0,0 +1,349 @@ +/* + * Copyright (C) 2021, Ivanov Arkady + * Copyright (C) 2023, Jean-Romain Garnier + * + * Drcov - a DynamoRIO-based tool that collects coverage information + * from a binary. Primary goal this script is to have coverage log + * files that work in Lighthouse. + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; + +static FILE *fp; +static const char *file_name = "file.drcov.trace"; +static GMutex bb_lock; +static GMutex mod_lock; + +typedef struct { + uint32_t start; + uint16_t size; + uint16_t mod_id; + bool exec; +} bb_entry_t; + +typedef struct { + uint16_t id; + uint64_t base; + uint64_t end; + uint64_t entry; + gchar* path; + bool loaded; +} module_entry_t; + +/* Translated blocks */ +static GPtrArray *blocks; + +/* Loaded modules */ +static GPtrArray *modules; +static uint16_t next_mod_id = 0; + +/* Plugin */ + +static void printf_char_array32(uint32_t data) +{ + const uint8_t *bytes = (const uint8_t *)(&data); + fwrite(bytes, sizeof(char), sizeof(data), fp); +} + +static void printf_char_array16(uint16_t data) +{ + const uint8_t *bytes = (const uint8_t *)(&data); + fwrite(bytes, sizeof(char), sizeof(data), fp); +} + +static void printf_mod(gpointer data, gpointer user_data) +{ + module_entry_t *mod = (module_entry_t *)data; + fprintf(fp, "%d, 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64 ", %s\n", + mod->id, mod->base, mod->end, mod->entry, mod->path); + g_free(mod); +} + +static void printf_bb(gpointer data, gpointer user_data) +{ + bb_entry_t *bb = (bb_entry_t *)data; + if (bb->exec) { + printf_char_array32(bb->start); + printf_char_array16(bb->size); + printf_char_array16(bb->mod_id); + } + g_free(bb); +} + +static void printf_header(unsigned long count) +{ + fprintf(fp, "DRCOV VERSION: 2\n"); + fprintf(fp, "DRCOV FLAVOR: drcov-64\n"); + fprintf(fp, "Module Table: version 2, count %d\n", modules->len); + fprintf(fp, "Columns: id, base, end, entry, path\n"); + g_ptr_array_foreach(modules, printf_mod, NULL); + fprintf(fp, "BB Table: %ld bbs\n", count); +} + +static module_entry_t *create_mod_entry(MapInfo *info) +{ + module_entry_t *module = g_new0(module_entry_t, 1); + module->id = next_mod_id++; + module->base = info->start; + module->end = info->end; + module->entry = 0; + module->path = g_strdup(info->path); + module->loaded = true; + return module; +} + +static guint insert_mod_entry(module_entry_t *module, guint start_idx) +{ + module_entry_t *entry; + guint i = start_idx; + guint insert_idx = 0; + + // Find where to insert this modules, if it doesn't already exist, so we + // keep the module list sorted + while (i < modules->len) { + entry = (module_entry_t *)modules->pdata[i]; + + // If the new module ends before the current one starts, insert it here + // to keep the modules array sorted + if (entry->base >= module->end) { + g_ptr_array_insert(modules, i, module); + return i++; + } + + // If the new module starts after the current one ends, we'll insert it + // later + if (entry->end <= module->base) { + i++; + continue; + } + + // Now, two cases remain: the new module is the same as the current + // entry, or the new module is different but has intersecting addresses + + // Start by checking if the two modules match + if ( + entry->base == module->base + && entry->end == module->end + && !strcmp(entry->path, module->path) + ) { + // This module is already in the array, not need to insert it again + entry->loaded = true; + g_free(module); + return i; + } + + // We know this is a new module and there is at least one old module + // with intersecting addresses + + // Mark all modules which start before the new one as unloaded + // Note: there is no need to check entry->end because of the previous + // checks + while (entry->base < module->base && i < modules->len) { + entry = (module_entry_t *)modules->pdata[i]; + entry->loaded = false; + i++; + } + + // This is the right place to insert the new module, so save this index + insert_idx = i; + + // We still need to mark all the modules which start before the new one + // ends as unloaded + while (entry->base < module->end && i < modules->len) { + entry = (module_entry_t *)modules->pdata[i]; + entry->loaded = false; + i++; + } + + // Finally, insert the new module + g_ptr_array_insert(modules, insert_idx, module); + return i++; + } + + // If nowhere was found to insert the module, simply append it + g_ptr_array_add(modules, module); + return modules->len; +} + +static void update_mod_entries(void) +{ + guint insert_idx; + module_entry_t *module; + GSList *maps, *iter; + MapInfo *info; + + // Read modules from self_maps, which is unfortunately very slow, and insert + // them in our internal array + module = NULL; + insert_idx = 0; + maps = read_self_maps(); + for (iter = maps; iter; iter = g_slist_next(iter)) { + info = (MapInfo *)iter->data; + // We want to merge contiguous entries for the same file into a single + // module + if (NULL == module) { + // There is no previous entry, create one and merge it later + module = create_mod_entry(info); + } else if (module->end == info->start && !strcmp(module->path, info->path)) { + // This new entry can be merged with the existing module and + // inserted later + module->end = info->end; + continue; + } else if (strlen(info->path) > 0 && info->path[0] != '[') { + // This is a different entry which also happens to be interesting, + // so insert the previous one and create a new + insert_idx = insert_mod_entry(module, insert_idx); + module = create_mod_entry(info); + } + } + + // If there is a module left over, insert it now + if (NULL != module) { + insert_mod_entry(module, insert_idx); + } + + free_self_maps(maps); +} + +static module_entry_t *get_cached_exec_mod_entry(uint64_t pc) +{ + guint i; + module_entry_t *entry; + + // Check if this address is contained within one of the modules we already + // know about + for (i = 0; i < modules->len; i++) { + entry = (module_entry_t *)modules->pdata[i]; + if (pc >= entry->base && pc < entry->end && entry->loaded) { + return entry; + } + } + return NULL; +} + +static module_entry_t *get_exec_mod_entry(uint64_t pc) +{ + module_entry_t *module = NULL; + + g_mutex_lock(&mod_lock); + + // Find module within which pc is contained + // Important: This will not work properly if a module is dynamically loaded + // (e.g. using dlopen), unloaded, and then another is loaded at the same + // address + module = get_cached_exec_mod_entry(pc); + + // If none is found, try to reload module list and look again + if (NULL == module) { + update_mod_entries(); + module = get_cached_exec_mod_entry(pc); + } + + g_mutex_unlock(&mod_lock); + return module; +} + +static void count_block(gpointer data, gpointer user_data) +{ + unsigned long *count = (unsigned long *) user_data; + bb_entry_t *bb = (bb_entry_t *)data; + if (bb->exec) { + *count = *count + 1; + } +} + +static void plugin_exit(qemu_plugin_id_t id, void *p) +{ + unsigned long count = 0; + g_mutex_lock(&bb_lock); + g_mutex_lock(&mod_lock); + g_ptr_array_foreach(blocks, count_block, &count); + + /* Print function */ + printf_header(count); + g_ptr_array_foreach(blocks, printf_bb, NULL); + + /* Clear */ + g_ptr_array_free(blocks, true); + g_ptr_array_free(modules, true); + + fclose(fp); + + g_mutex_unlock(&mod_lock); + g_mutex_unlock(&bb_lock); +} + +static void plugin_init(void) +{ + fp = fopen(file_name, "wb"); + blocks = g_ptr_array_sized_new(128); + modules = g_ptr_array_sized_new(16); +} + +static void vcpu_tb_exec(unsigned int cpu_index, void *udata) +{ + bb_entry_t *bb = (bb_entry_t *) udata; + + g_mutex_lock(&bb_lock); + bb->exec = true; + g_mutex_unlock(&bb_lock); +} + +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + uint64_t pc = qemu_plugin_tb_vaddr(tb); + size_t n = qemu_plugin_tb_n_insns(tb); + module_entry_t *module = get_exec_mod_entry(pc); + bb_entry_t *bb = g_new0(bb_entry_t, 1); + + for (int i = 0; i < n; i++) { + bb->size += qemu_plugin_insn_size(qemu_plugin_tb_get_insn(tb, i)); + } + + bb->start = module ? (pc - module->base): pc; + bb->mod_id = module ? module->id: -1; + bb->exec = false; + + g_mutex_lock(&bb_lock); + g_ptr_array_add(blocks, bb); + g_mutex_unlock(&bb_lock); + + qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec, + QEMU_PLUGIN_CB_NO_REGS, + (void *)bb); + +} + +QEMU_PLUGIN_EXPORT +int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, + int argc, char **argv) +{ + for (int i = 0; i < argc; i++) { + g_auto(GStrv) tokens = g_strsplit(argv[i], "=", 2); + if (g_strcmp0(tokens[0], "filename") == 0) { + file_name = g_strdup(tokens[1]); + } + } + + plugin_init(); + + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); + qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); + + return 0; +} diff --git a/gdbstub.c b/gdbstub.c index 3ee40479b69be..5c15814504071 100644 --- a/gdbstub.c +++ b/gdbstub.c @@ -63,7 +63,13 @@ static int phy_memory_mode; #endif -static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr, + +void (*signal_handler)(int); +void set_signal_callback(void (*sg)(int)){ + signal_handler = sg; +} + +int target_memory_rw_debug(CPUState *cpu, target_ulong addr, uint8_t *buf, int len, bool is_write) { CPUClass *cc; @@ -377,7 +383,7 @@ typedef struct GDBState { static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER; /* Retrieves flags for single step mode. */ -static int get_sstep_flags(void) +int get_sstep_flags(void) { /* * In replay mode all events written into the log should be replayed. @@ -467,7 +473,7 @@ int use_gdb_syscalls(void) } /* Resume execution. */ -static inline void gdb_continue(void) +void gdb_continue(void) { #ifdef CONFIG_USER_ONLY @@ -920,7 +926,7 @@ static const char *get_feature_xml(const char *p, const char **newp, return name ? xml_builtin[i][1] : NULL; } -static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) +int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) { CPUClass *cc = CPU_GET_CLASS(cpu); CPUArchState *env = cpu->env_ptr; @@ -938,7 +944,7 @@ static int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) return 0; } -static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg) +int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg) { CPUClass *cc = CPU_GET_CLASS(cpu); CPUArchState *env = cpu->env_ptr; @@ -1017,7 +1023,7 @@ static inline int xlat_gdb_type(CPUState *cpu, int gdbtype) } #endif -static int gdb_breakpoint_insert(int type, target_ulong addr, target_ulong len) +int gdb_breakpoint_insert(int type, target_ulong addr, target_ulong len) { CPUState *cpu; int err = 0; @@ -1054,7 +1060,7 @@ static int gdb_breakpoint_insert(int type, target_ulong addr, target_ulong len) } } -static int gdb_breakpoint_remove(int type, target_ulong addr, target_ulong len) +int gdb_breakpoint_remove(int type, target_ulong addr, target_ulong len) { CPUState *cpu; int err = 0; @@ -1122,7 +1128,7 @@ static void gdb_breakpoint_remove_all(void) } } -static void gdb_set_cpu_pc(target_ulong pc) +void gdb_set_cpu_pc(target_ulong pc) { CPUState *cpu = gdbserver_state.c_cpu; @@ -3129,49 +3135,66 @@ static void create_default_process(GDBState *s) } #ifdef CONFIG_USER_ONLY +char dbg[100]; int gdb_handlesig(CPUState *cpu, int sig) { - char buf[256]; - int n; + // sprintf(dbg, "sig: %d init: %d fd: %d\n", sig, gdbserver_state.init, gdbserver_state.fd); + // qemu_plugin_outs(dbg); - if (!gdbserver_state.init || gdbserver_state.fd < 0) { - return sig; + if (signal_handler){ + if (sig == GDB_SIGNAL_TRAP){ + signal_handler(sig); + } + else{ + gdbserver_state.signal = sig; + } } + else{ - /* disable single step if it was enabled */ - cpu_single_step(cpu, 0); - tb_flush(cpu); + char buf[256]; + int n; - if (sig != 0) { - snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig)); - put_packet(buf); - } - /* put_packet() might have detected that the peer terminated the - connection. */ - if (gdbserver_state.fd < 0) { - return sig; - } + if (!gdbserver_state.init || gdbserver_state.fd < 0) { + return sig; + } - sig = 0; - gdbserver_state.state = RS_IDLE; - gdbserver_state.running_state = 0; - while (gdbserver_state.running_state == 0) { - n = read(gdbserver_state.fd, buf, 256); - if (n > 0) { - int i; + /* disable single step if it was enabled */ + cpu_single_step(cpu, 0); + tb_flush(cpu); + - for (i = 0; i < n; i++) { - gdb_read_byte(buf[i]); + if (sig != 0) { + snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig)); + put_packet(buf); + } + /* put_packet() might have detected that the peer terminated the + connection. */ + if (gdbserver_state.fd < 0) { + return sig; + } + + sig = 0; + gdbserver_state.state = RS_IDLE; + gdbserver_state.running_state = 0; + while (gdbserver_state.running_state == 0) { + n = read(gdbserver_state.fd, buf, 256); + if (n > 0) { + int i; + + for (i = 0; i < n; i++) { + gdb_read_byte(buf[i]); + } } - } else { - /* XXX: Connection closed. Should probably wait for another - connection before continuing. */ - if (n == 0) { - close(gdbserver_state.fd); + else { + /* XXX: Connection closed. Should probably wait for another + connection before continuing. */ + if (n == 0) { //Do not close connection if in afl patching mode + close(gdbserver_state.fd); + } + gdbserver_state.fd = -1; + return sig; } - gdbserver_state.fd = -1; - return sig; } } sig = gdbserver_state.signal; @@ -3192,7 +3215,7 @@ void gdb_signalled(CPUArchState *env, int sig) put_packet(buf); } -static void gdb_accept_init(int fd) +void gdb_accept_init(int fd) { init_gdbserver_state(); create_default_process(&gdbserver_state); @@ -3562,4 +3585,4 @@ static void register_types(void) } type_init(register_types); -#endif +#endif \ No newline at end of file diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c index 7c42f65706859..e3e6392724ac7 100644 --- a/linux-user/aarch64/cpu_loop.c +++ b/linux-user/aarch64/cpu_loop.c @@ -25,6 +25,8 @@ #include "hw/semihosting/common-semi.h" #include "target/arm/syndrome.h" +#include "qemuafl/common.h" + #define get_user_code_u32(x, gaddr, env) \ ({ abi_long __r = get_user_u32((x), (gaddr)); \ if (!__r && bswap_code(arm_sctlr_b(env))) { \ @@ -89,6 +91,10 @@ void cpu_loop(CPUARMState *env) switch (trapnr) { case EXCP_SWI: + if (persistent_exits && env->xregs[8] == TARGET_NR_exit_group) { + env->pc = afl_persistent_addr; + break; + } ret = do_syscall(env, env->xregs[8], env->xregs[0], diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c index cadfb7fa43978..0c8f1f66f8be3 100644 --- a/linux-user/arm/cpu_loop.c +++ b/linux-user/arm/cpu_loop.c @@ -24,6 +24,8 @@ #include "cpu_loop-common.h" #include "hw/semihosting/common-semi.h" +#include "qemuafl/common.h" + #define get_user_code_u32(x, gaddr, env) \ ({ abi_long __r = get_user_u32((x), (gaddr)); \ if (!__r && bswap_code(arm_sctlr_b(env))) { \ @@ -404,6 +406,10 @@ void cpu_loop(CPUARMState *env) break; } } else { + if (persistent_exits && n == TARGET_NR_exit_group) { + env->regs[15] = afl_persistent_addr; + break; + } ret = do_syscall(env, n, env->regs[0], diff --git a/linux-user/elfload.c b/linux-user/elfload.c index bab4237e90fd8..8bd4a8d3e23fc 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -15,6 +15,8 @@ #include "qemu/selfmap.h" #include "qapi/error.h" +#include "qemuafl/common.h" + #ifdef _ARCH_PPC64 #undef ARCH_DLINFO #undef ELF_PLATFORM @@ -394,8 +396,8 @@ static bool init_guest_commpage(void) MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); if (addr == MAP_FAILED) { - perror("Allocating guest commpage"); - exit(EXIT_FAILURE); +// perror("Allocating guest commpage"); +// exit(EXIT_FAILURE); } if (addr != want) { return false; @@ -2443,8 +2445,8 @@ void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, * we are trying to work with that. Otherwise, we have selected * free space and init_guest_commpage must succeeded. */ - assert(have_guest_base); - pgb_fail_in_use(image_name); +// assert(have_guest_base); +// pgb_fail_in_use(image_name); } assert(QEMU_IS_ALIGNED(guest_base, align)); @@ -2860,9 +2862,11 @@ static void load_elf_image(const char *image_name, int image_fd, if (elf_prot & PROT_EXEC) { if (vaddr < info->start_code) { info->start_code = vaddr; + if (!afl_start_code) afl_start_code = vaddr; } if (vaddr_ef > info->end_code) { info->end_code = vaddr_ef; + if (!afl_end_code) afl_end_code = vaddr_ef; } } if (elf_prot & PROT_WRITE) { @@ -2908,6 +2912,52 @@ static void load_elf_image(const char *image_name, int image_fd, load_symbols(ehdr, image_fd, load_bias); } + if (!afl_exit_point) { + char *ptr; + if ((ptr = getenv("AFL_EXITPOINT")) != NULL) { + afl_exit_point = strtoul(ptr, NULL, 16); +#ifdef TARGET_ARM + /* The least significant bit indicates Thumb mode. */ + afl_exit_point = afl_exit_point & ~(target_ulong)1; +#endif + if (getenv("AFL_DEBUG") != NULL) + fprintf(stderr, "AFL exitpoint: 0x%lx\n", + (unsigned long)afl_exit_point); + } + } + + if (!afl_entry_point) { + char *ptr; + if ((ptr = getenv("AFL_ENTRYPOINT")) != NULL) { + afl_entry_point = strtoul(ptr, NULL, 16); + } else { + // On PowerPC64 the entry point is the _function descriptor_ + // of the entry function. For AFL to properly initialize, + // afl_entry_point needs to be set to the actual first instruction + // as opposed executed by the target program. This as opposed to + // where the function's descriptor sits in memory. + // copied from PPC init_thread +#if defined(TARGET_PPC64) && !defined(TARGET_ABI32) + if (get_ppc64_abi(info) < 2) { + uint64_t val; + get_user_u64(val, info->entry); + afl_entry_point = val + info->load_bias; + } else { + afl_entry_point = info->entry; + } +#else + afl_entry_point = info->entry; +#endif + } +#ifdef TARGET_ARM + /* The least significant bit indicates Thumb mode. */ + afl_entry_point = afl_entry_point & ~(target_ulong)1; +#endif + } + if (getenv("AFL_DEBUG") != NULL) + fprintf(stderr, "AFL forkserver entrypoint: 0x%lx\n", + (unsigned long)afl_entry_point); + mmap_unlock(); close(image_fd); diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c index f813e87294af8..4509f46b95bb9 100644 --- a/linux-user/i386/cpu_loop.c +++ b/linux-user/i386/cpu_loop.c @@ -22,6 +22,8 @@ #include "qemu.h" #include "cpu_loop-common.h" +#include "qemuafl/common.h" + /***********************************************************/ /* CPUX86 core interface */ @@ -211,6 +213,10 @@ void cpu_loop(CPUX86State *env) switch(trapnr) { case 0x80: /* linux syscall from int $0x80 */ + if (persistent_exits && env->regs[R_EAX] == TARGET_NR_exit_group) { + env->eip = afl_persistent_addr; + continue; + } ret = do_syscall(env, env->regs[R_EAX], env->regs[R_EBX], @@ -229,6 +235,11 @@ void cpu_loop(CPUX86State *env) #ifndef TARGET_ABI32 case EXCP_SYSCALL: /* linux syscall from syscall instruction */ + if (afl_fork_child && persistent_exits && + env->regs[R_EAX] == TARGET_NR_exit_group) { + env->eip = afl_persistent_addr; + continue; + } ret = do_syscall(env, env->regs[R_EAX], env->regs[R_EDI], diff --git a/linux-user/i386/target_elf.h b/linux-user/i386/target_elf.h index 1c6142e7da0d7..238a9aba738a0 100644 --- a/linux-user/i386/target_elf.h +++ b/linux-user/i386/target_elf.h @@ -9,6 +9,6 @@ #define I386_TARGET_ELF_H static inline const char *cpu_get_model(uint32_t eflags) { - return "qemu32"; + return "max"; } #endif diff --git a/linux-user/main.c b/linux-user/main.c index 81f48ff54ed4b..cdc4780c4e1dd 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -49,6 +49,8 @@ #include "cpu_loop-common.h" #include "crypto/init.h" +#include "qemuafl/qasan-qemu.h" + char *exec_path; int singlestep; @@ -222,6 +224,73 @@ CPUArchState *cpu_copy(CPUArchState *env) return new_env; } +/* A shorthand way to suppress the warnings that you are ignoring the return value of asprintf() */ +static inline void ignore_result(long long int unused_result) +{ + (void) unused_result; +} + +/* Get libqasan path. */ +#ifndef AFL_PATH + #define AFL_PATH "/usr/local/lib/afl/" +#endif +static char *get_libqasan_path(char *own_loc) +{ + if (!unlikely(own_loc)) { + fprintf(stderr, "BUG: param own_loc is NULL\n"); + exit(EXIT_FAILURE); + } + + char *tmp, *cp = NULL, *rsl, *own_copy; + + tmp = getenv("AFL_PATH"); + if (tmp) { + ignore_result(asprintf(&cp, "%s/libqasan.so", tmp)); + if (access(cp, X_OK)) { + fprintf(stderr, "Unable to find '%s'\n", tmp); + exit(EXIT_FAILURE); + } + + return cp; + } + + own_copy = strdup(own_loc); + rsl = strrchr(own_copy, '/'); + if (rsl) { + *rsl = 0; + + ignore_result(asprintf(&cp, "%s/libqasan.so", own_copy)); + free(own_copy); + + if (!access(cp, X_OK)) { return cp; } + + } else { + free(own_copy); + } + + if (!access(AFL_PATH "/libqasan.so", X_OK)) { + if (cp) { free(cp); } + + return strdup(AFL_PATH "/libqasan.so"); + } + + /* This is an AFL error message, but since it is in QEMU it can't + have all the pretty formatting of AFL without importing + a bunch of AFL pieces. */ + fprintf(stderr, "\n" "" "[-] " "" + "Oops, unable to find the 'libqasan.so' binary. The binary must be " + "built\n" + " separately by following the instructions in " + "qemu_mode/libqasan/README.md. " + "If you\n" + " already have the binary installed, you may need to specify " + "AFL_PATH in the\n" + " environment.\n"); + + fprintf(stderr, "Failed to locate 'libqasan.so'.\n"); + exit(EXIT_FAILURE); +} + static void handle_arg_help(const char *arg) { usage(EXIT_SUCCESS); @@ -632,6 +701,18 @@ int main(int argc, char **argv, char **envp) int log_mask; unsigned long max_reserved_va; + use_qasan = !!getenv("AFL_USE_QASAN"); + + if (getenv("QASAN_MAX_CALL_STACK")) + qasan_max_call_stack = atoi(getenv("QASAN_MAX_CALL_STACK")); + if (getenv("QASAN_SYMBOLIZE")) + qasan_symbolize = atoi(getenv("QASAN_SYMBOLIZE")); + +#if defined(ASAN_GIOVESE) && !defined(DO_NOT_USE_QASAN) + if (use_qasan) + asan_giovese_init(); +#endif + error_init(argv[0]); module_call_init(MODULE_INIT_TRACE); qemu_init_cpu_list(); @@ -644,6 +725,45 @@ int main(int argc, char **argv, char **envp) (void) envlist_setenv(envlist, *wrk); } + /* Add AFL_PRELOAD for qasan if it is enabled */ + if(use_qasan) { + char *preload = getenv("AFL_PRELOAD"); + char *libqasan = get_libqasan_path(argv[0]); + + if (!preload) { + setenv("AFL_PRELOAD", libqasan, 0); + } else { + /* NOTE: If there is more than one in the list, LD_PRELOAD allows spaces or colons + as separators (but no escaping provided), but DYLD_INSERT_LIBRARIES allows only colons. + Prefer colons for maximum compatibility, but use space if the string already has any. */ + char * afl_preload; + if (strchr(preload, ' ')) { + ignore_result(asprintf(&afl_preload, "%s %s", libqasan, preload)); + } else { + ignore_result(asprintf(&afl_preload, "%s:%s", libqasan, preload)); + } + + setenv("AFL_PRELOAD", afl_preload, 1); + free(afl_preload); + } + free(libqasan); + } + + /* Expand AFL_PRELOAD to append preload libraries */ + char *afl_preload = getenv("AFL_PRELOAD"); + if (afl_preload) { + /* NOTE: If there is more than one in the list, LD_PRELOAD allows spaces or colons + as separators, but DYLD_INSERT_LIBRARIES allows only colons. + Maybe we should attempt to normalize the list here before we assign it? */ + char * ld_preload; + ignore_result(asprintf(&ld_preload, "LD_PRELOAD=%s", afl_preload)); + envlist_setenv(envlist, ld_preload); + + char * dyld_insert; + ignore_result(asprintf(&dyld_insert, "DYLD_INSERT_LIBRARIES=%s", afl_preload)); + envlist_setenv(envlist, dyld_insert); + } + /* Read the stack limit from the kernel. If it's "unlimited", then we can do little else besides use the default. */ { diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c index 9d813ece4e75a..7e62cdc20d7f3 100644 --- a/linux-user/mips/cpu_loop.c +++ b/linux-user/mips/cpu_loop.c @@ -25,6 +25,9 @@ #include "internal.h" #include "fpu_helper.h" +/* MIPS_PATCH */ +#include "qemuafl/common.h" + # ifdef TARGET_ABI_MIPSO32 # define MIPS_SYSCALL_NUMBER_UNUSED -1 static const int8_t mips_syscall_args[] = { @@ -81,6 +84,18 @@ void cpu_loop(CPUMIPSState *env) switch(trapnr) { case EXCP_SYSCALL: + if ( + persistent_exits && + ( + env->active_tc.gpr[2] == TARGET_NR_exit_group || + // uclibc may use the following signal instead of + // exit_group: + env->active_tc.gpr[2] == TARGET_NR_exit + ) + ) { + env->active_tc.PC = afl_persistent_addr; + continue; + } env->active_tc.PC += 4; # ifdef TARGET_ABI_MIPSO32 syscall_num = env->active_tc.gpr[2] - 4000; diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 7e3b2450368a5..1e0e2dbede511 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -21,6 +21,25 @@ #include "exec/log.h" #include "qemu.h" +#include "qemuafl/common.h" +#include "qemuafl/interval-tree/interval-tree.inl" + +struct mmap_tree_node { + + struct rb_node rb; + abi_long start, end; + abi_long __subtree_last; + +}; + +#define MMAP_TREE_START(node) ((node)->start) +#define MMAP_TREE_LAST(node) ((node)->end) + +INTERVAL_TREE_DEFINE(struct mmap_tree_node, rb, abi_long, __subtree_last, + MMAP_TREE_START, MMAP_TREE_LAST, static, mmap_tree) + +static struct rb_root mmap_tree_root = RB_ROOT; + static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; static __thread int mmap_lock_count; @@ -80,7 +99,7 @@ static int validate_prot_to_pageflags(int *host_prot, int prot) * Pages that are executable by the guest will never be executed * by the host, but the host will need to be able to read them. */ - *host_prot = (prot & (PROT_READ | PROT_WRITE)) + *host_prot = (prot & (PROT_READ | PROT_WRITE | PROT_EXEC)) | (prot & PROT_EXEC ? PROT_READ : 0); #ifdef TARGET_AARCH64 @@ -617,6 +636,14 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, log_page_dump(__func__); } tb_invalidate_phys_range(start, start + len); + + if (afl_fork_child && persistent_memory) { + struct mmap_tree_node* node = calloc(sizeof(struct mmap_tree_node), 1); + node->start = start; + node->end = start + len; + mmap_tree_insert(node, &mmap_tree_root); + } + mmap_unlock(); return start; fail: @@ -721,6 +748,17 @@ int target_munmap(abi_ulong start, abi_ulong len) if (ret == 0) { page_set_flags(start, start + len, 0); tb_invalidate_phys_range(start, start + len); + + if (afl_fork_child && persistent_memory) { + struct mmap_tree_node* node = mmap_tree_iter_first(&mmap_tree_root, + start, start + len); + while (node) { + struct mmap_tree_node* next = mmap_tree_iter_next(node, start, + start + len); + mmap_tree_remove(node, &mmap_tree_root); + node = next; + } + } } mmap_unlock(); return ret; @@ -809,8 +847,36 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, page_set_flags(old_addr, old_addr + old_size, 0); page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID | PAGE_RESET); + + if (afl_fork_child && persistent_memory) { + struct mmap_tree_node* node = mmap_tree_iter_first(&mmap_tree_root, + old_addr, old_addr + old_size); + while (node) { + struct mmap_tree_node* next = mmap_tree_iter_next(node, old_addr, + old_addr + old_size); + mmap_tree_remove(node, &mmap_tree_root); + node = next; + } + + node = calloc(sizeof(struct mmap_tree_node), 1); + node->start = new_addr; + node->end = new_addr + new_size; + mmap_tree_insert(node, &mmap_tree_root); + } } tb_invalidate_phys_range(new_addr, new_addr + new_size); mmap_unlock(); return new_addr; } + +void afl_target_unmap_trackeds(void) { + + struct mmap_tree_node* node = mmap_tree_iter_first(&mmap_tree_root, 0, + (abi_ulong)-1); + while (node) { + struct mmap_tree_node* next = mmap_tree_iter_next(node, 0, (abi_ulong)-1); + target_munmap(node->start, node->end - node->start); + node = next; + } + +} diff --git a/linux-user/signal.c b/linux-user/signal.c index 7eecec46c4070..8b23f67821196 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -25,6 +25,8 @@ #include "trace.h" #include "signal-common.h" +#include "qemuafl/qasan-qemu.h" + static struct target_sigaction sigact_table[TARGET_NSIG]; static void host_signal_handler(int host_signum, siginfo_t *info, @@ -626,7 +628,6 @@ static void QEMU_NORETURN dump_core_and_abort(int target_sig) TaskState *ts = (TaskState *)cpu->opaque; int host_sig, core_dumped = 0; struct sigaction act; - host_sig = target_to_host_signal(target_sig); trace_user_force_sig(env, target_sig, host_sig); gdb_signalled(env, target_sig); @@ -937,7 +938,9 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig, print_taken_signal(sig, &k->info); } - if (handler == TARGET_SIG_DFL) { + int ignore_handling = !!getenv("AFL_QEMU_FORCE_DFL"); + + if (handler == TARGET_SIG_DFL || ignore_handling) { /* default handler : ignore some signal. The other are job control or fatal */ if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { kill(getpid(),SIGSTOP); @@ -945,11 +948,49 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig, sig != TARGET_SIGURG && sig != TARGET_SIGWINCH && sig != TARGET_SIGCONT) { + +#if defined(ASAN_GIOVESE) && !defined(DO_NOT_USE_QASAN) + if (use_qasan) { + if (sig == TARGET_SIGILL || + sig != TARGET_SIGFPE || + sig != TARGET_SIGSEGV || + sig != TARGET_SIGBUS) + asan_giovese_deadly_signal(target_to_host_signal(sig), + k->info._sifields._sigfault._addr, + PC_GET(cpu_env), BP_GET(cpu_env), + SP_GET(cpu_env)); + else + asan_giovese_deadly_signal(target_to_host_signal(sig), + PC_GET(cpu_env), + PC_GET(cpu_env), BP_GET(cpu_env), + SP_GET(cpu_env)); + } +#endif + dump_core_and_abort(sig); } } else if (handler == TARGET_SIG_IGN) { /* ignore sig */ } else if (handler == TARGET_SIG_ERR) { + +#if defined(ASAN_GIOVESE) && !defined(DO_NOT_USE_QASAN) + if (use_qasan) { + if (sig == TARGET_SIGILL || + sig != TARGET_SIGFPE || + sig != TARGET_SIGSEGV || + sig != TARGET_SIGBUS) + asan_giovese_deadly_signal(target_to_host_signal(sig), + k->info._sifields._sigfault._addr, + PC_GET(cpu_env), BP_GET(cpu_env), + SP_GET(cpu_env)); + else + asan_giovese_deadly_signal(target_to_host_signal(sig), + PC_GET(cpu_env), + PC_GET(cpu_env), BP_GET(cpu_env), + SP_GET(cpu_env)); + } +#endif + dump_core_and_abort(sig); } else { /* compute the blocked signals during the handler execution */ diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 389ec09764734..88d5af05ae7b9 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -95,7 +95,25 @@ #include #include #include + +#ifdef HAVE_SYS_MOUNT_FSCONFIG +/* + * glibc >= 2.36 linux/mount.h conflicts with sys/mount.h, + * which in turn prevents use of linux/fs.h. So we have to + * define the constants ourselves for now. + */ +#define FS_IOC_GETFLAGS _IOR('f', 1, long) +#define FS_IOC_SETFLAGS _IOW('f', 2, long) +#define FS_IOC_GETVERSION _IOR('v', 1, long) +#define FS_IOC_SETVERSION _IOW('v', 2, long) +#define FS_IOC_FIEMAP _IOWR('f', 11, struct fiemap) +#define FS_IOC32_GETFLAGS _IOR('f', 1, int) +#define FS_IOC32_SETFLAGS _IOW('f', 2, int) +#define FS_IOC32_GETVERSION _IOR('v', 1, int) +#define FS_IOC32_SETVERSION _IOW('v', 2, int) +#else #include +#endif #include #if defined(CONFIG_FIEMAP) #include @@ -134,6 +152,9 @@ #include "fd-trans.h" #include "tcg/tcg.h" +#include "qemuafl/common.h" +#include "qemuafl/qasan-qemu.h" + #ifndef CLONE_IO #define CLONE_IO 0x80000000 /* Clone io context */ #endif @@ -885,6 +906,15 @@ void target_set_brk(abi_ulong new_brk) brk_page = HOST_PAGE_ALIGN(target_brk); } +abi_ulong afl_get_brk(void) { + return target_brk; +} +abi_ulong afl_set_brk(abi_ulong new_brk) { + abi_ulong old_brk = target_brk; + target_brk = new_brk; + return old_brk; +} + //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0) #define DEBUGF_BRK(message, args...) @@ -7855,7 +7885,7 @@ static int open_self_cmdline(void *cpu_env, int fd) return 0; } -static int open_self_maps(void *cpu_env, int fd) +int open_self_maps(void *cpu_env, int fd) { CPUState *cpu = env_cpu((CPUArchState *)cpu_env); TaskState *ts = cpu->opaque; @@ -8369,6 +8399,8 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return ret; #endif case TARGET_NR_close: + if (unlikely(arg1 == TSL_FD)) + return 0x00; fd_trans_unregister(arg1); return get_errno(close(arg1)); @@ -8463,7 +8495,7 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #endif case TARGET_NR_execve: { - char **argp, **envp; + char **argp = NULL, **envp = NULL; int argc, envc; abi_ulong gp; abi_ulong guest_argp; @@ -8488,6 +8520,35 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return -TARGET_EFAULT; if (!addr) break; + /* QASAN: remove preloaded library */ + if (use_qasan && !getenv("QASAN_PRESERVE_EXECVE")) { + /* + * If we need to clear the LD_PRELOAD list, run the memory + * lock and unlock methods to inspect the contents within + * the strings. + */ + abi_long len = target_strlen(gp); + if (len < 0) { + return -TARGET_EFAULT; + } + char *env = lock_user(VERIFY_WRITE, gp, (long)(len + 1), 0); + if (!env) + goto execve_efault; + if (!strncmp("LD_PRELOAD=", env, 11)) { + char *p, *q, *r; + if ((q = r = strstr(env +11, "libqasan.so")) != NULL) { + size_t mlen = strlen("libqasan.so"); + while ((r = strstr(p = r + mlen, "libqasan.so")) != NULL) { + while (p < r) + *q++ = *p++; + } + while ((*q++ = *p++) != '\0') + continue; + } + + } + unlock_user(env, gp, (long)(len + 1)); + } envc++; } @@ -12329,8 +12390,19 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2))); case TARGET_NR_tgkill: - return get_errno(safe_tgkill((int)arg1, (int)arg2, - target_to_host_signal(arg3))); + { + int pid = (int)arg1, + tgid = (int)arg2, + sig = (int)arg3; + + /* Not entirely sure if the below is correct for all architectures. */ + + if(afl_forksrv_pid && afl_forksrv_pid == pid && sig == SIGABRT) + pid = tgid = getpid(); + + return get_errno(safe_tgkill(pid, tgid, target_to_host_signal(sig))); + + } #ifdef TARGET_NR_set_robust_list case TARGET_NR_set_robust_list: @@ -13256,6 +13328,15 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, return ret; #endif + case QASAN_FAKESYS_NR: + /* QASAN syscall */ + if (use_qasan) { + return qasan_actions_dispatcher(cpu_env, arg1, arg2, arg3, arg4); + } else { + fprintf(stderr, "QAsan syscall unsupported without enabling QASan mode (AFL_USE_QASAN)\n"); + return -TARGET_ENOSYS; + } + default: qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); return -TARGET_ENOSYS; diff --git a/linux-user/x86_64/target_elf.h b/linux-user/x86_64/target_elf.h index 7b76a90de8805..3f628f8d66197 100644 --- a/linux-user/x86_64/target_elf.h +++ b/linux-user/x86_64/target_elf.h @@ -9,6 +9,6 @@ #define X86_64_TARGET_ELF_H static inline const char *cpu_get_model(uint32_t eflags) { - return "qemu64"; + return "max"; } #endif diff --git a/meson b/meson index 776acd2a805c9..d0c68dc11507a 160000 --- a/meson +++ b/meson @@ -1 +1 @@ -Subproject commit 776acd2a805c9b42b4f0375150977df42130317f +Subproject commit d0c68dc11507a47b9b85de508e023d9590d60565 diff --git a/meson.build b/meson.build index 07bc23129af75..3431c7c1e88a2 100644 --- a/meson.build +++ b/meson.build @@ -1149,6 +1149,9 @@ config_host_data.set('HAVE_PTY_H', cc.has_header('pty.h')) config_host_data.set('HAVE_SYS_IOCCOM_H', cc.has_header('sys/ioccom.h')) config_host_data.set('HAVE_SYS_KCOV_H', cc.has_header('sys/kcov.h')) config_host_data.set('HAVE_SYSTEM_FUNCTION', cc.has_function('system', prefix: '#include ')) +config_host_data.set('HAVE_SYS_MOUNT_FSCONFIG', + cc.has_header_symbol('sys/mount.h', 'FSCONFIG_SET_FLAG')) + config_host_data.set('CONFIG_PREADV', cc.has_function('preadv', prefix: '#include ')) diff --git a/pc-bios/keymaps/meson.build b/pc-bios/keymaps/meson.build index 05eda6c0d26df..cbee14ba82bcf 100644 --- a/pc-bios/keymaps/meson.build +++ b/pc-bios/keymaps/meson.build @@ -1,5 +1,5 @@ keymaps = { - 'ar': '-l ar', + 'ar': '-l ara', 'bepo': '-l fr -v dvorak', 'cz': '-l cz', 'da': '-l dk', diff --git a/qemuafl/api.h b/qemuafl/api.h new file mode 100644 index 0000000000000..1d956ab56d83e --- /dev/null +++ b/qemuafl/api.h @@ -0,0 +1,215 @@ +#ifndef __AFL_QEMU_API_H__ +#define __AFL_QEMU_API_H__ + +#include + +#if defined(TARGET_MIPS64) || defined(TARGET_AARCH64) || defined(TARGET_X86_64) || defined(TARGET_PPC64) || defined(TARGET_RISCV64) +# define TARGET_LONG_BITS 64 +#else +# define TARGET_LONG_BITS 32 +#endif + +/* see include/exec/cpu-defs.h */ +#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) + +#if TARGET_LONG_SIZE == 4 +typedef int32_t target_long; +typedef uint32_t target_ulong; +#elif TARGET_LONG_SIZE == 8 +typedef int64_t target_long; +typedef uint64_t target_ulong; +#else +#error TARGET_LONG_SIZE undefined +#endif + + +struct x86_regs { + + uint32_t eax, ebx, ecx, edx, edi, esi, ebp; + + union { + uint32_t eip; + uint32_t pc; + }; + union { + uint32_t esp; + uint32_t sp; + }; + union { + uint32_t eflags; + uint32_t flags; + }; + + uint8_t xmm_regs[8][16]; + +}; + +struct x86_64_regs { + + uint64_t rax, rbx, rcx, rdx, rdi, rsi, rbp, + r8, r9, r10, r11, r12, r13, r14, r15; + + union { + uint64_t rip; + uint64_t pc; + }; + union { + uint64_t rsp; + uint64_t sp; + }; + union { + uint64_t rflags; + uint64_t flags; + }; + + uint8_t zmm_regs[32][64]; + +}; + +struct arm_regs { + + uint32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10; + + union { + uint32_t r11; + uint32_t fp; + }; + union { + uint32_t r12; + uint32_t ip; + }; + union { + uint32_t r13; + uint32_t sp; + }; + union { + uint32_t r14; + uint32_t lr; + }; + union { + uint32_t r15; + uint32_t pc; + }; + + uint32_t cpsr; + + uint8_t vfp_zregs[32][16]; + uint32_t vfp_xregs[16]; + +}; + +struct arm64_regs { + + uint64_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10; + + union { + uint64_t x11; + uint32_t fp_32; + }; + union { + uint64_t x12; + uint32_t ip_32; + }; + union { + uint64_t x13; + uint32_t sp_32; + }; + union { + uint64_t x14; + uint32_t lr_32; + }; + union { + uint64_t x15; + uint32_t pc_32; + }; + union { + uint64_t x16; + uint64_t ip0; + }; + union { + uint64_t x17; + uint64_t ip1; + }; + + uint64_t x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28; + + union { + uint64_t x29; + uint64_t fp; + }; + union { + uint64_t x30; + uint64_t lr; + }; + union { + uint64_t x31; + uint64_t sp; + }; + // the zero register is not saved here ofc + + uint64_t pc; + + uint32_t cpsr; + + uint8_t vfp_zregs[32][16*16]; + uint8_t vfp_pregs[17][32]; + uint32_t vfp_xregs[16]; + +}; + +/* MIPS_PATCH */ +#if defined(TARGET_MIPS) || defined(TARGET_MIPS64) + +// check standalone usage +// if smth in pers hook goes wrong, check constants below with target/mips/cpu.h +#ifndef MIPS_CPU_H +#include +#include "../include/fpu/softfloat-types.h" + +/* MSA Context */ +#define MSA_WRLEN (128) +typedef union wr_t wr_t; +union wr_t { + int8_t b[MSA_WRLEN / 8]; + int16_t h[MSA_WRLEN / 16]; + int32_t w[MSA_WRLEN / 32]; + int64_t d[MSA_WRLEN / 64]; +}; +typedef union fpr_t fpr_t; +union fpr_t { + float64 fd; /* ieee double precision */ + float32 fs[2];/* ieee single precision */ + uint64_t d; /* binary double fixed-point */ + uint32_t w[2]; /* binary single fixed-point */ +/* FPU/MSA register mapping is not tested on big-endian hosts. */ + wr_t wr; /* vector data */ +}; +#define MIPS_DSP_ACC 4 +#endif + +struct mips_regs { + target_ulong r0, at, v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, t7, s0, + s1, s2, s3, s4, s5, s6, s7, t8, t9, k0, k1, gp, sp, fp, ra; + #if defined(TARGET_MIPS64) + /* + * For CPUs using 128-bit GPR registers, we put the lower halves in gpr[]) + * and the upper halves in gpr_hi[]. + */ + uint64_t gpr_hi[32]; + #endif /* TARGET_MIPS64 */ + target_ulong HI[MIPS_DSP_ACC]; + target_ulong LO[MIPS_DSP_ACC]; + target_ulong ACX[MIPS_DSP_ACC]; + target_ulong PC; + fpr_t fpr[32]; +}; +#endif + +struct ppc_regs { + target_ulong gpr[32]; /* general purpose registers */ + target_ulong lr; + target_ulong ctr; + uint32_t crf[8]; /* condition register */ +}; + +#endif diff --git a/qemuafl/asan-giovese-inl.h b/qemuafl/asan-giovese-inl.h new file mode 100644 index 0000000000000..74f6a9bf53d34 --- /dev/null +++ b/qemuafl/asan-giovese-inl.h @@ -0,0 +1,1536 @@ +/******************************************************************************* +BSD 2-Clause License + +Copyright (c) 2020-2021, Andrea Fioraldi +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ + +#include "asan-giovese.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#define DEFAULT_REDZONE_SIZE 128 + +// ------------------------------------------------------------------------- // +// Alloc +// ------------------------------------------------------------------------- // + +#include "interval-tree/rbtree.h" +#include "interval-tree/interval_tree_generic.h" + +// TODO use a mutex for locking insert/delete + +struct alloc_tree_node { + + struct rb_node rb; + struct chunk_info ckinfo; + target_ulong __subtree_last; + +}; + +#define START(node) ((node)->ckinfo.start) +#define LAST(node) ((node)->ckinfo.end) + +INTERVAL_TREE_DEFINE(struct alloc_tree_node, rb, target_ulong, __subtree_last, + START, LAST, static, alloc_tree) + +static struct rb_root root = RB_ROOT; + +struct chunk_info* asan_giovese_alloc_search(target_ulong query) { + + struct alloc_tree_node* node = alloc_tree_iter_first(&root, query, query); + if (node) return &node->ckinfo; + return NULL; + +} + +void asan_giovese_alloc_insert(target_ulong start, target_ulong end, + struct call_context* alloc_ctx) { + + struct alloc_tree_node* prev_node = alloc_tree_iter_first(&root, start, end); + while (prev_node) { + + struct alloc_tree_node* n = alloc_tree_iter_next(prev_node, start, end); + free(prev_node->ckinfo.alloc_ctx); + free(prev_node->ckinfo.free_ctx); + alloc_tree_remove(prev_node, &root); + prev_node = n; + + } + + struct alloc_tree_node* node = calloc(sizeof(struct alloc_tree_node), 1); + node->ckinfo.start = start; + node->ckinfo.end = end; + node->ckinfo.alloc_ctx = alloc_ctx; + alloc_tree_insert(node, &root); + +} + +// ------------------------------------------------------------------------- // +// Init +// ------------------------------------------------------------------------- // + +void* __ag_high_shadow = HIGH_SHADOW_ADDR; +void* __ag_low_shadow = LOW_SHADOW_ADDR; + +void asan_giovese_init(void) { + +#if UINTPTR_MAX == 0xffffffff + fprintf(stderr, "ERROR: Cannot allocate sanitizer shadow memory on 32 bit " + "platforms."); + exit(1); +#else + assert(mmap(__ag_high_shadow, HIGH_SHADOW_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON, -1, + 0) != MAP_FAILED); + + assert(mmap(__ag_low_shadow, LOW_SHADOW_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON, -1, + 0) != MAP_FAILED); + + assert(mmap(GAP_SHADOW_ADDR, GAP_SHADOW_SIZE, PROT_NONE, + MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON, -1, + 0) != MAP_FAILED); +#endif + +} + +// ------------------------------------------------------------------------- // +// Checks +// ------------------------------------------------------------------------- // + +int asan_giovese_load1(void* ptr) { + + uintptr_t h = (uintptr_t)ptr; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && (intptr_t)((h & 7) + 1) > k; + +} + +int asan_giovese_load2(void* ptr) { + + uintptr_t h = (uintptr_t)ptr; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && (intptr_t)((h & 7) + 2) > k; + +} + +int asan_giovese_load4(void* ptr) { + + uintptr_t h = (uintptr_t)ptr; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && (intptr_t)((h & 7) + 4) > k; + +} + +int asan_giovese_load8(void* ptr) { + + uintptr_t h = (uintptr_t)ptr; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + return (*shadow_addr); + +} + +int asan_giovese_store1(void* ptr) { + + uintptr_t h = (uintptr_t)ptr; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && (intptr_t)((h & 7) + 1) > k; + +} + +int asan_giovese_store2(void* ptr) { + + uintptr_t h = (uintptr_t)ptr; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && (intptr_t)((h & 7) + 2) > k; + +} + +int asan_giovese_store4(void* ptr) { + + uintptr_t h = (uintptr_t)ptr; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && (intptr_t)((h & 7) + 4) > k; + +} + +int asan_giovese_store8(void* ptr) { + + uintptr_t h = (uintptr_t)ptr; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + return (*shadow_addr); + +} + +int asan_giovese_loadN(void* ptr, size_t n) { + + if (!n) return 0; + + uintptr_t start = (uintptr_t)ptr; + uintptr_t end = start + n; + uintptr_t last_8 = end & ~7; + + if (start & 0x7) { + + uintptr_t next_8 = (start & ~7) + 8; + size_t first_size = next_8 - start; + + if (n <= first_size) { + + uintptr_t h = start; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && ((intptr_t)((h & 7) + n) > k); + + } + + uintptr_t h = start; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + if (k != 0 && ((intptr_t)((h & 7) + first_size) > k)) return 1; + + start = next_8; + + } + + while (start < last_8) { + + uintptr_t h = start; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + if (*shadow_addr) return 1; + start += 8; + + } + + if (last_8 != end) { + + uintptr_t h = start; + size_t last_size = end - last_8; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && ((intptr_t)((h & 7) + last_size) > k); + + } + + return 0; + +} + +int asan_giovese_storeN(void* ptr, size_t n) { + + if (!n) return 0; + + uintptr_t start = (uintptr_t)ptr; + uintptr_t end = start + n; + uintptr_t last_8 = end & ~7; + + if (start & 0x7) { + + uintptr_t next_8 = (start & ~7) + 8; + size_t first_size = next_8 - start; + + if (n <= first_size) { + + uintptr_t h = start; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && ((intptr_t)((h & 7) + n) > k); + + } + + uintptr_t h = start; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + if (k != 0 && ((intptr_t)((h & 7) + first_size) > k)) return 1; + + start = next_8; + + } + + while (start < last_8) { + + uintptr_t h = start; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + if (*shadow_addr) return 1; + start += 8; + + } + + if (last_8 != end) { + + uintptr_t h = start; + size_t last_size = end - last_8; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && ((intptr_t)((h & 7) + last_size) > k); + + } + + return 0; + +} + +int asan_giovese_guest_loadN(target_ulong addr, size_t n) { + + if (!n) return 0; + + target_ulong start = addr; + target_ulong end = start + n; + target_ulong last_8 = end & ~7; + + if (start & 0x7) { + + target_ulong next_8 = (start & ~7) + 8; + size_t first_size = next_8 - start; + + if (n <= first_size) { + + uintptr_t h = (uintptr_t)AFL_G2H(start); + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && ((intptr_t)((h & 7) + n) > k); + + } + + uintptr_t h = (uintptr_t)AFL_G2H(start); + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + if (k != 0 && ((intptr_t)((h & 7) + first_size) > k)) return 1; + + start = next_8; + + } + + while (start < last_8) { + + uintptr_t h = (uintptr_t)AFL_G2H(start); + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + if (*shadow_addr) return 1; + start += 8; + + } + + if (last_8 != end) { + + uintptr_t h = (uintptr_t)AFL_G2H(start); + size_t last_size = end - last_8; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && ((intptr_t)((h & 7) + last_size) > k); + + } + + return 0; + +} + +int asan_giovese_guest_storeN(target_ulong addr, size_t n) { + + if (!n) return 0; + + target_ulong start = addr; + target_ulong end = start + n; + target_ulong last_8 = end & ~7; + + if (start & 0x7) { + + target_ulong next_8 = (start & ~7) + 8; + size_t first_size = next_8 - start; + + if (n <= first_size) { + + uintptr_t h = (uintptr_t)AFL_G2H(start); + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && ((intptr_t)((h & 7) + n) > k); + + } + + uintptr_t h = (uintptr_t)AFL_G2H(start); + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + if (k != 0 && ((intptr_t)((h & 7) + first_size) > k)) return 1; + + start = next_8; + + } + + while (start < last_8) { + + uintptr_t h = (uintptr_t)AFL_G2H(start); + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + if (*shadow_addr) return 1; + start += 8; + + } + + if (last_8 != end) { + + uintptr_t h = (uintptr_t)AFL_G2H(start); + size_t last_size = end - last_8; + int8_t* shadow_addr = (int8_t*)(h >> 3) + SHADOW_OFFSET; + int8_t k = *shadow_addr; + return k != 0 && ((intptr_t)((h & 7) + last_size) > k); + + } + + return 0; + +} + +// ------------------------------------------------------------------------- // +// Poison +// ------------------------------------------------------------------------- // + +int asan_giovese_poison_region(void* ptr, size_t n, + uint8_t poison_byte) { + + if (!n) return 0; + + uintptr_t start = (uintptr_t)ptr; + uintptr_t end = start + n; + uintptr_t last_8 = end & ~7; + + if (start & 0x7) { + + target_ulong next_8 = (start & ~7) + 8; + size_t first_size = next_8 - start; + + if (n < first_size) return 0; + + uintptr_t h = start; + uint8_t* shadow_addr = (uint8_t*)(h >> 3) + SHADOW_OFFSET; + *shadow_addr = 8 - first_size; + + start = next_8; + + } + + while (start < last_8) { + + uintptr_t h = start; + uint8_t* shadow_addr = (uint8_t*)(h >> 3) + SHADOW_OFFSET; + *shadow_addr = poison_byte; + start += 8; + + } + + return 1; + +} + +int asan_giovese_user_poison_region(void* ptr, size_t n) { + + return asan_giovese_poison_region(ptr, n, ASAN_USER); + +} + +int asan_giovese_unpoison_region(void* ptr, size_t n) { + + target_ulong start = (uintptr_t)ptr; + target_ulong end = start + n; + + while (start < end) { + + uintptr_t h = start; + uint8_t* shadow_addr = (uint8_t*)(h >> 3) + SHADOW_OFFSET; + *shadow_addr = 0; + start += 8; + + } + + return 1; + +} + +int asan_giovese_poison_guest_region(target_ulong addr, size_t n, + uint8_t poison_byte) { + + if (!n) return 0; + + target_ulong start = addr; + target_ulong end = start + n; + target_ulong last_8 = end & ~7; + + if (start & 0x7) { + + target_ulong next_8 = (start & ~7) + 8; + size_t first_size = next_8 - start; + + if (n < first_size) return 0; + + uintptr_t h = (uintptr_t)AFL_G2H(start); + uint8_t* shadow_addr = (uint8_t*)(h >> 3) + SHADOW_OFFSET; + *shadow_addr = 8 - first_size; + + start = next_8; + + } + + while (start < last_8) { + + uintptr_t h = (uintptr_t)AFL_G2H(start); + uint8_t* shadow_addr = (uint8_t*)(h >> 3) + SHADOW_OFFSET; + *shadow_addr = poison_byte; + start += 8; + + } + + return 1; + +} + +int asan_giovese_user_poison_guest_region(target_ulong addr, size_t n) { + + return asan_giovese_poison_guest_region(addr, n, ASAN_USER); + +} + +int asan_giovese_unpoison_guest_region(target_ulong addr, size_t n) { + + target_ulong start = addr; + target_ulong end = start + n; + + while (start < end) { + + uintptr_t h = (uintptr_t)AFL_G2H(start); + uint8_t* shadow_addr = (uint8_t*)(h >> 3) + SHADOW_OFFSET; + *shadow_addr = 0; + start += 8; + + } + + return 1; + +} + + +// ------------------------------------------------------------------------- // +// Report +// ------------------------------------------------------------------------- // + +// from https://gist.github.com/RabaDabaDoba/145049536f815903c79944599c6f952a + +// Regular text +#define ANSI_COLOR_BLK "\e[0;30m" +#define ANSI_COLOR_RED "\e[0;31m" +#define ANSI_COLOR_GRN "\e[0;32m" +#define ANSI_COLOR_YEL "\e[0;33m" +#define ANSI_COLOR_BLU "\e[0;34m" +#define ANSI_COLOR_MAG "\e[0;35m" +#define ANSI_COLOR_CYN "\e[0;36m" +#define ANSI_COLOR_WHT "\e[0;37m" + +// High intensty text +#define ANSI_COLOR_HBLK "\e[0;90m" +#define ANSI_COLOR_HRED "\e[0;91m" +#define ANSI_COLOR_HGRN "\e[0;92m" +#define ANSI_COLOR_HYEL "\e[0;93m" +#define ANSI_COLOR_HBLU "\e[0;94m" +#define ANSI_COLOR_HMAG "\e[0;95m" +#define ANSI_COLOR_HCYN "\e[0;96m" +#define ANSI_COLOR_HWHT "\e[0;97m" + +// Reset +#define ANSI_COLOR_RESET "\e[0m" + +static const char* shadow_color_map[] = { + + "" /* 0x0 */, + "" /* 0x1 */, + "" /* 0x2 */, + "" /* 0x3 */, + "" /* 0x4 */, + "" /* 0x5 */, + "" /* 0x6 */, + "" /* 0x7 */, + "" /* 0x8 */, + "" /* 0x9 */, + "" /* 0xa */, + "" /* 0xb */, + "" /* 0xc */, + "" /* 0xd */, + "" /* 0xe */, + "" /* 0xf */, + "" /* 0x10 */, + "" /* 0x11 */, + "" /* 0x12 */, + "" /* 0x13 */, + "" /* 0x14 */, + "" /* 0x15 */, + "" /* 0x16 */, + "" /* 0x17 */, + "" /* 0x18 */, + "" /* 0x19 */, + "" /* 0x1a */, + "" /* 0x1b */, + "" /* 0x1c */, + "" /* 0x1d */, + "" /* 0x1e */, + "" /* 0x1f */, + "" /* 0x20 */, + "" /* 0x21 */, + "" /* 0x22 */, + "" /* 0x23 */, + "" /* 0x24 */, + "" /* 0x25 */, + "" /* 0x26 */, + "" /* 0x27 */, + "" /* 0x28 */, + "" /* 0x29 */, + "" /* 0x2a */, + "" /* 0x2b */, + "" /* 0x2c */, + "" /* 0x2d */, + "" /* 0x2e */, + "" /* 0x2f */, + "" /* 0x30 */, + "" /* 0x31 */, + "" /* 0x32 */, + "" /* 0x33 */, + "" /* 0x34 */, + "" /* 0x35 */, + "" /* 0x36 */, + "" /* 0x37 */, + "" /* 0x38 */, + "" /* 0x39 */, + "" /* 0x3a */, + "" /* 0x3b */, + "" /* 0x3c */, + "" /* 0x3d */, + "" /* 0x3e */, + "" /* 0x3f */, + "" /* 0x40 */, + "" /* 0x41 */, + "" /* 0x42 */, + "" /* 0x43 */, + "" /* 0x44 */, + "" /* 0x45 */, + "" /* 0x46 */, + "" /* 0x47 */, + "" /* 0x48 */, + "" /* 0x49 */, + "" /* 0x4a */, + "" /* 0x4b */, + "" /* 0x4c */, + "" /* 0x4d */, + "" /* 0x4e */, + "" /* 0x4f */, + "" /* 0x50 */, + "" /* 0x51 */, + "" /* 0x52 */, + "" /* 0x53 */, + "" /* 0x54 */, + "" /* 0x55 */, + "" /* 0x56 */, + "" /* 0x57 */, + "" /* 0x58 */, + "" /* 0x59 */, + "" /* 0x5a */, + "" /* 0x5b */, + "" /* 0x5c */, + "" /* 0x5d */, + "" /* 0x5e */, + "" /* 0x5f */, + "" /* 0x60 */, + "" /* 0x61 */, + "" /* 0x62 */, + "" /* 0x63 */, + "" /* 0x64 */, + "" /* 0x65 */, + "" /* 0x66 */, + "" /* 0x67 */, + "" /* 0x68 */, + "" /* 0x69 */, + "" /* 0x6a */, + "" /* 0x6b */, + "" /* 0x6c */, + "" /* 0x6d */, + "" /* 0x6e */, + "" /* 0x6f */, + "" /* 0x70 */, + "" /* 0x71 */, + "" /* 0x72 */, + "" /* 0x73 */, + "" /* 0x74 */, + "" /* 0x75 */, + "" /* 0x76 */, + "" /* 0x77 */, + "" /* 0x78 */, + "" /* 0x79 */, + "" /* 0x7a */, + "" /* 0x7b */, + "" /* 0x7c */, + "" /* 0x7d */, + "" /* 0x7e */, + "" /* 0x7f */, + "" /* 0x80 */, + "" /* 0x81 */, + "" /* 0x82 */, + "" /* 0x83 */, + "" /* 0x84 */, + "" /* 0x85 */, + "" /* 0x86 */, + "" /* 0x87 */, + "" /* 0x88 */, + "" /* 0x89 */, + "" /* 0x8a */, + "" /* 0x8b */, + "" /* 0x8c */, + "" /* 0x8d */, + "" /* 0x8e */, + "" /* 0x8f */, + "" /* 0x90 */, + "" /* 0x91 */, + "" /* 0x92 */, + "" /* 0x93 */, + "" /* 0x94 */, + "" /* 0x95 */, + "" /* 0x96 */, + "" /* 0x97 */, + "" /* 0x98 */, + "" /* 0x99 */, + "" /* 0x9a */, + "" /* 0x9b */, + "" /* 0x9c */, + "" /* 0x9d */, + "" /* 0x9e */, + "" /* 0x9f */, + "" /* 0xa0 */, + "" /* 0xa1 */, + "" /* 0xa2 */, + "" /* 0xa3 */, + "" /* 0xa4 */, + "" /* 0xa5 */, + "" /* 0xa6 */, + "" /* 0xa7 */, + "" /* 0xa8 */, + "" /* 0xa9 */, + "" /* 0xaa */, + "" /* 0xab */, + ANSI_COLOR_HRED /* 0xac */, + "" /* 0xad */, + "" /* 0xae */, + "" /* 0xaf */, + "" /* 0xb0 */, + "" /* 0xb1 */, + "" /* 0xb2 */, + "" /* 0xb3 */, + "" /* 0xb4 */, + "" /* 0xb5 */, + "" /* 0xb6 */, + "" /* 0xb7 */, + "" /* 0xb8 */, + "" /* 0xb9 */, + "" /* 0xba */, + ANSI_COLOR_HYEL /* 0xbb */, + "" /* 0xbc */, + "" /* 0xbd */, + "" /* 0xbe */, + "" /* 0xbf */, + "" /* 0xc0 */, + "" /* 0xc1 */, + "" /* 0xc2 */, + "" /* 0xc3 */, + "" /* 0xc4 */, + "" /* 0xc5 */, + "" /* 0xc6 */, + "" /* 0xc7 */, + "" /* 0xc8 */, + "" /* 0xc9 */, + ANSI_COLOR_HBLU /* 0xca */, + ANSI_COLOR_HBLU /* 0xcb */, + "" /* 0xcc */, + "" /* 0xcd */, + "" /* 0xce */, + "" /* 0xcf */, + "" /* 0xd0 */, + "" /* 0xd1 */, + "" /* 0xd2 */, + "" /* 0xd3 */, + "" /* 0xd4 */, + "" /* 0xd5 */, + "" /* 0xd6 */, + "" /* 0xd7 */, + "" /* 0xd8 */, + "" /* 0xd9 */, + "" /* 0xda */, + "" /* 0xdb */, + "" /* 0xdc */, + "" /* 0xdd */, + "" /* 0xde */, + "" /* 0xdf */, + "" /* 0xe0 */, + "" /* 0xe1 */, + "" /* 0xe2 */, + "" /* 0xe3 */, + "" /* 0xe4 */, + "" /* 0xe5 */, + "" /* 0xe6 */, + "" /* 0xe7 */, + "" /* 0xe8 */, + "" /* 0xe9 */, + "" /* 0xea */, + "" /* 0xeb */, + "" /* 0xec */, + "" /* 0xed */, + "" /* 0xee */, + "" /* 0xef */, + "" /* 0xf0 */, + ANSI_COLOR_HRED /* 0xf1 */, + ANSI_COLOR_HRED /* 0xf2 */, + ANSI_COLOR_HRED /* 0xf3 */, + "" /* 0xf4 */, + ANSI_COLOR_HMAG /* 0xf5 */, + ANSI_COLOR_HCYN /* 0xf6 */, + ANSI_COLOR_HBLU /* 0xf7 */, + ANSI_COLOR_HMAG /* 0xf8 */, + ANSI_COLOR_HRED /* 0xf9 */, + ANSI_COLOR_HRED /* 0xfa */, + ANSI_COLOR_HRED /* 0xfb */, + ANSI_COLOR_HBLU /* 0xfc */, + ANSI_COLOR_HMAG /* 0xfd */, + ANSI_COLOR_HYEL /* 0xfe */, + "" /* 0xff */ + +}; + +static const char* access_type_str[] = {"READ", "WRITE"}; + +static const char* poisoned_strerror(uint8_t poison_byte) { + + switch (poison_byte) { + + case ASAN_HEAP_RZ: + case ASAN_HEAP_LEFT_RZ: + case ASAN_HEAP_RIGHT_RZ: return "heap-buffer-overflow"; + case ASAN_HEAP_FREED: return "heap-use-after-free"; + + } + + return "use-after-poison"; + +} + +static int poisoned_find_error(target_ulong addr, size_t n, + target_ulong* fault_addr, + const char** err_string) { + + target_ulong start = addr; + target_ulong end = start + n; + int have_partials = 0; + + while (start < end) { + + uintptr_t rs = (uintptr_t)AFL_G2H(start); + int8_t* shadow_addr = (int8_t*)(rs >> 3) + SHADOW_OFFSET; + switch (*shadow_addr) { + + case ASAN_VALID: have_partials = 0; break; + case ASAN_PARTIAL1: + case ASAN_PARTIAL2: + case ASAN_PARTIAL3: + case ASAN_PARTIAL4: + case ASAN_PARTIAL5: + case ASAN_PARTIAL6: + case ASAN_PARTIAL7: { + + have_partials = 1; + target_ulong a = (start & ~7) + *shadow_addr; + if (*fault_addr == 0 && a >= start && a < end) *fault_addr = a; + break; + + } + + default: { + + if (*fault_addr == 0) *fault_addr = start; + *err_string = poisoned_strerror(*shadow_addr); + return 1; + + } + + } + + start += 8; + + } + + if (have_partials) { + + uintptr_t rs = (uintptr_t)AFL_G2H((end & ~7) + 8); + uint8_t* last_shadow_addr = (uint8_t*)(rs >> 3) + SHADOW_OFFSET; + *err_string = poisoned_strerror(*last_shadow_addr); + return 1; + + } + + if (*fault_addr == 0) *fault_addr = addr; + *err_string = "use-after-poison"; + return 1; + +} + +#define _MEM2SHADOW(x) ((uint8_t*)((uintptr_t)AFL_G2H(x) >> 3) + SHADOW_OFFSET) + +#define _MEM2SHADOWPRINT(x) shadow_color_map[*_MEM2SHADOW(x)], *_MEM2SHADOW(x) + +static int print_shadow_line(target_ulong addr) { + + fprintf(stderr, + " 0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET "\n", + (uintptr_t)_MEM2SHADOW(addr), _MEM2SHADOWPRINT(addr), + _MEM2SHADOWPRINT(addr + 8), _MEM2SHADOWPRINT(addr + 16), + _MEM2SHADOWPRINT(addr + 24), _MEM2SHADOWPRINT(addr + 32), + _MEM2SHADOWPRINT(addr + 40), _MEM2SHADOWPRINT(addr + 48), + _MEM2SHADOWPRINT(addr + 56), _MEM2SHADOWPRINT(addr + 64), + _MEM2SHADOWPRINT(addr + 72), _MEM2SHADOWPRINT(addr + 80), + _MEM2SHADOWPRINT(addr + 88), _MEM2SHADOWPRINT(addr + 96), + _MEM2SHADOWPRINT(addr + 104), _MEM2SHADOWPRINT(addr + 112), + _MEM2SHADOWPRINT(addr + 120)); + + return 1; + +} + +static int print_shadow_line_fault(target_ulong addr, target_ulong fault_addr) { + + int i = (fault_addr - addr) / 8; + const char* format = + "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + switch (i) { + + case 0: + format = "=>0x%012" PRIxPTR ":[%s%02x" ANSI_COLOR_RESET + "]%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 1: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + "[%s%02x" ANSI_COLOR_RESET "]%s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 2: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET "[%s%02x" ANSI_COLOR_RESET + "]%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 3: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + "[%s%02x" ANSI_COLOR_RESET "]%s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 4: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET "[%s%02x" ANSI_COLOR_RESET + "]%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 5: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + "[%s%02x" ANSI_COLOR_RESET "]%s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 6: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET "[%s%02x" ANSI_COLOR_RESET + "]%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 7: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + "[%s%02x" ANSI_COLOR_RESET "]%s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 8: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET "[%s%02x" ANSI_COLOR_RESET + "]%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 9: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + "[%s%02x" ANSI_COLOR_RESET "]%s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 10: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET "[%s%02x" ANSI_COLOR_RESET + "]%s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 11: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET "[%s%02x" ANSI_COLOR_RESET + "]%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 12: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET "[%s%02x" ANSI_COLOR_RESET + "]%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 13: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET "[%s%02x" ANSI_COLOR_RESET + "]%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET "\n"; + break; + case 14: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + "[%s%02x" ANSI_COLOR_RESET "]%s%02x" ANSI_COLOR_RESET "\n"; + break; + case 15: + format = "=>0x%012" PRIxPTR ": %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET + " " + "%s%02x" ANSI_COLOR_RESET " %s%02x" ANSI_COLOR_RESET + " %s%02x" ANSI_COLOR_RESET "[%s%02x" ANSI_COLOR_RESET "]\n"; + break; + + } + + fprintf(stderr, format, (uintptr_t)_MEM2SHADOW(addr), _MEM2SHADOWPRINT(addr), + _MEM2SHADOWPRINT(addr + 8), _MEM2SHADOWPRINT(addr + 16), + _MEM2SHADOWPRINT(addr + 24), _MEM2SHADOWPRINT(addr + 32), + _MEM2SHADOWPRINT(addr + 40), _MEM2SHADOWPRINT(addr + 48), + _MEM2SHADOWPRINT(addr + 56), _MEM2SHADOWPRINT(addr + 64), + _MEM2SHADOWPRINT(addr + 72), _MEM2SHADOWPRINT(addr + 80), + _MEM2SHADOWPRINT(addr + 88), _MEM2SHADOWPRINT(addr + 96), + _MEM2SHADOWPRINT(addr + 104), _MEM2SHADOWPRINT(addr + 112), + _MEM2SHADOWPRINT(addr + 120)); + + return 1; + +} + +#undef _MEM2SHADOW +#undef _MEM2SHADOWPRINT + +static void print_shadow(target_ulong addr) { + + target_ulong center = addr & ~127; + print_shadow_line(center - 16 * 8 * 5); + print_shadow_line(center - 16 * 8 * 4); + print_shadow_line(center - 16 * 8 * 3); + print_shadow_line(center - 16 * 8 * 2); + print_shadow_line(center - 16 * 8); + print_shadow_line_fault(center, addr); + print_shadow_line(center + 16 * 8); + print_shadow_line(center + 16 * 8 * 2); + print_shadow_line(center + 16 * 8 * 3); + print_shadow_line(center + 16 * 8 * 4); + print_shadow_line(center + 16 * 8 * 5); + +} + +static void print_alloc_location_chunk(struct chunk_info* ckinfo, + target_ulong fault_addr) { + + if (fault_addr >= ckinfo->start && fault_addr < ckinfo->end) + fprintf(stderr, + ANSI_COLOR_HGRN + "0x" TARGET_FMT_lx " is located " TARGET_FMT_ld + " bytes inside of " TARGET_FMT_ld "-byte region [0x" + TARGET_FMT_lx ",0x" TARGET_FMT_lx ")" ANSI_COLOR_RESET "\n", + fault_addr, fault_addr - ckinfo->start, ckinfo->end - ckinfo->start, + ckinfo->start, ckinfo->end); + else if (ckinfo->start >= fault_addr) + fprintf(stderr, + ANSI_COLOR_HGRN + "0x" TARGET_FMT_lx " is located " TARGET_FMT_ld + " bytes to the left of " TARGET_FMT_ld "-byte region [0x" + TARGET_FMT_lx ",0x" TARGET_FMT_lx ")" ANSI_COLOR_RESET "\n", + fault_addr, ckinfo->start - fault_addr, ckinfo->end - ckinfo->start, + ckinfo->start, ckinfo->end); + else + fprintf(stderr, + ANSI_COLOR_HGRN + "0x" TARGET_FMT_lx " is located " TARGET_FMT_ld + " bytes to the right of " TARGET_FMT_ld "-byte region [0x" + TARGET_FMT_lx ",0x" TARGET_FMT_lx ")" ANSI_COLOR_RESET "\n", + fault_addr, fault_addr - ckinfo->end, ckinfo->end - ckinfo->start, + ckinfo->start, ckinfo->end); + + if (ckinfo->free_ctx) { + + fprintf(stderr, + ANSI_COLOR_HMAG "freed by thread T%d here:" ANSI_COLOR_RESET "\n", + ckinfo->free_ctx->tid); + size_t i; + for (i = 0; i < ckinfo->free_ctx->size; ++i) { + + char* printable = asan_giovese_printaddr(ckinfo->free_ctx->addresses[i]); + if (printable) + fprintf(stderr, " #%zu 0x" TARGET_FMT_lx "%s\n", i, + ckinfo->free_ctx->addresses[i], printable); + else + fprintf(stderr, " #%zu 0x" TARGET_FMT_lx "\n", i, + ckinfo->free_ctx->addresses[i]); + } + + fputc('\n', stderr); + + fprintf(stderr, + ANSI_COLOR_HMAG + "previously allocated by thread T%d here:" ANSI_COLOR_RESET "\n", + ckinfo->free_ctx->tid); + + } else + + fprintf(stderr, + ANSI_COLOR_HMAG "allocated by thread T%d here:" ANSI_COLOR_RESET + "\n", + ckinfo->alloc_ctx->tid); + + size_t i; + for (i = 0; i < ckinfo->alloc_ctx->size; ++i) { + + char* printable = asan_giovese_printaddr(ckinfo->alloc_ctx->addresses[i]); + if (printable) + fprintf(stderr, " #%zu 0x" TARGET_FMT_lx "%s\n", i, + ckinfo->alloc_ctx->addresses[i], printable); + else + fprintf(stderr, " #%zu 0x" TARGET_FMT_lx "\n", i, + ckinfo->alloc_ctx->addresses[i]); + + } + + fputc('\n', stderr); + +} + +static void print_alloc_location(target_ulong addr, target_ulong fault_addr) { + + struct chunk_info* ckinfo = asan_giovese_alloc_search(fault_addr); + if (!ckinfo && addr != fault_addr) ckinfo = asan_giovese_alloc_search(addr); + + if (ckinfo) { + + print_alloc_location_chunk(ckinfo, fault_addr); + return; + + } + + int i = 0; + while (!ckinfo && i < DEFAULT_REDZONE_SIZE) + ckinfo = asan_giovese_alloc_search(fault_addr - (i++)); + if (ckinfo) { + + print_alloc_location_chunk(ckinfo, fault_addr); + return; + + } + + i = 0; + while (!ckinfo && i < DEFAULT_REDZONE_SIZE) + ckinfo = asan_giovese_alloc_search(fault_addr + (i++)); + if (ckinfo) { + + print_alloc_location_chunk(ckinfo, fault_addr); + return; + + } + + fprintf(stderr, "Address 0x" TARGET_FMT_lx " is a wild pointer.\n", + fault_addr); + +} + +int asan_giovese_report_and_crash(int access_type, target_ulong addr, size_t n, + CPUArchState *env) { + + target_ulong pc= PC_GET(env); + target_ulong bp= BP_GET(env); + target_ulong sp= SP_GET(env); + struct call_context ctx; + asan_giovese_populate_context(&ctx, pc); + target_ulong fault_addr = 0; + const char* error_type; + + if (!poisoned_find_error(addr, n, &fault_addr, &error_type)) return 0; + + fprintf(stderr, + "=================================================================\n" + ANSI_COLOR_HRED "==%d==ERROR: " ASAN_NAME_STR ": %s on address 0x" + TARGET_FMT_lx " at pc 0x" TARGET_FMT_lx " bp 0x" TARGET_FMT_lx + " sp 0x" TARGET_FMT_lx ANSI_COLOR_RESET "\n", + getpid(), error_type, addr, pc, bp, sp); + + fprintf(stderr, + ANSI_COLOR_HBLU "%s of size %zu at 0x" TARGET_FMT_lx " thread T%d" + ANSI_COLOR_RESET "\n", + access_type_str[access_type], n, addr, ctx.tid); + size_t i; + for (i = 0; i < ctx.size; ++i) { + + char* printable = asan_giovese_printaddr(ctx.addresses[i]); + if (printable) + fprintf(stderr, " #%zu 0x" TARGET_FMT_lx "%s\n", i, ctx.addresses[i], + printable); + else + fprintf(stderr, " #%zu 0x" TARGET_FMT_lx "\n", i, ctx.addresses[i]); + + } + + fputc('\n', stderr); + + print_alloc_location(addr, fault_addr); + + const char* printable_pc = asan_giovese_printaddr(pc); + if (!printable_pc) printable_pc = ""; + fprintf(stderr, + "SUMMARY: " ASAN_NAME_STR + ": %s%s\n" + "Shadow bytes around the buggy address:\n", + error_type, printable_pc); + + print_shadow(fault_addr); + + fprintf( + stderr, + "Shadow byte legend (one shadow byte represents 8 application bytes):\n" + " Addressable: 00\n" + " Partially addressable: 01 02 03 04 05 06 07\n" + " Heap left redzone: " ANSI_COLOR_HRED "fa" ANSI_COLOR_RESET "\n" + " Heap right redzone: " ANSI_COLOR_HRED "fb" ANSI_COLOR_RESET "\n" + " Freed heap region: " ANSI_COLOR_HMAG "fd" ANSI_COLOR_RESET "\n" + //" Stack left redzone: " ANSI_COLOR_HRED "f1" ANSI_COLOR_RESET "\n" + //" Stack mid redzone: " ANSI_COLOR_HRED "f2" ANSI_COLOR_RESET "\n" + //" Stack right redzone: " ANSI_COLOR_HRED "f3" ANSI_COLOR_RESET "\n" + //" Stack after return: " ANSI_COLOR_HMAG "f5" ANSI_COLOR_RESET "\n" + //" Stack use after scope: " ANSI_COLOR_HMAG "f8" ANSI_COLOR_RESET "\n" + //" Global redzone: " ANSI_COLOR_HRED "f9" ANSI_COLOR_RESET "\n" + //" Global init order: " ANSI_COLOR_HCYN "f6" ANSI_COLOR_RESET "\n" + " Poisoned by user: " ANSI_COLOR_HBLU "f7" ANSI_COLOR_RESET "\n" + //" Container overflow: " ANSI_COLOR_HBLU "fc" ANSI_COLOR_RESET "\n" + //" Array cookie: " ANSI_COLOR_HRED "ac" ANSI_COLOR_RESET "\n" + //" Intra object redzone: " ANSI_COLOR_HYEL "bb" ANSI_COLOR_RESET "\n" + " ASan internal: " ANSI_COLOR_HYEL "fe" ANSI_COLOR_RESET "\n" + //" Left alloca redzone: " ANSI_COLOR_HBLU "ca" ANSI_COLOR_RESET "\n" + //" Right alloca redzone: " ANSI_COLOR_HBLU "cb" ANSI_COLOR_RESET "\n" + " Shadow gap: cc\n" + "==%d==ABORTING\n", + getpid()); + + /* + * Rather than aborting this host, we signal a DATA ABORT in the guest and + * abort the cpu_loop. This results in the generation of a SIGSEGV and + * subsequent generation (if configured) of a core-file for the guest which is + * much more useful for debugging purposes. + * + * However, it should be noted that there are a few limitations to these core + * files. Firstly the CPU program counter is set to the start of the basic + * block rather that to the faulting instruction (hence the context printed + * above and the core file do not have an up to date instruction pointer) and + * secondly the core file does not include the address of fault (this is a + * limitation of the core file format). + * + * When translating basic blocks the DisasContext structure carries the + * instruction pointer thereby allowing RIP-relative instructions to be + * properly translated. However, during execution, it is the CPUArchState + * state which carries the register context (including the instruction + * pointer). However, since the instruction pointer at the point of execution + * is fixed for any given instruction, its value can be incorporated at + * instrumentation time rather than execution. This therefore avoids the + * overhead of updating the CPUArchState at the completion of each + * instruction. However, this state is required to be updated at the start of + * each block to allow functionality such as execution tracing (the -d exec + * argument)to work properly. + */ + + /* + * Queue a SIGSEGV representing our fault. + */ + target_siginfo_t info = { + .si_signo = TARGET_SIGSEGV, + .si_errno = 0, + .si_code = TARGET_SEGV_MAPERR, + ._sifields._sigfault._addr = fault_addr + }; + queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + + /* + * Set the CPU state to represent an interrupt. This is suffient to cause the + * cpu_loop to break out and handle the queued exceptions. + */ + CPUState *cs = env_cpu(env); + cs->exception_index = EXCP_INTERRUPT; + cpu_loop_exit(cs); + + + return 0; +} + +static const char* singal_to_string[] = { + [SIGHUP] = "HUP", + [SIGINT] = "INT", + [SIGQUIT] = "QUIT", + [SIGILL] = "ILL", + [SIGTRAP] = "TRAP", + [SIGABRT] = "ABRT", + [SIGBUS] = "BUS", + [SIGFPE] = "FPE", + [SIGKILL] = "KILL", + [SIGUSR1] = "USR1", + [SIGSEGV] = "SEGV", + [SIGUSR2] = "USR2", + [SIGPIPE] = "PIPE", + [SIGALRM] = "ALRM", + [SIGTERM] = "TERM", +#ifdef SIGSTKFLT + [SIGSTKFLT] = "STKFLT", +#endif + [SIGCHLD] = "CHLD", + [SIGCONT] = "CONT", + [SIGSTOP] = "STOP", + [SIGTSTP] = "TSTP", + [SIGTTIN] = "TTIN", + [SIGTTOU] = "TTOU", + [SIGURG] = "URG", + [SIGXCPU] = "XCPU", + [SIGXFSZ] = "XFSZ", + [SIGVTALRM] = "VTALRM", + [SIGPROF] = "PROF", + [SIGWINCH] = "WINCH", + [SIGIO] = "IO", + [SIGPWR] = "PWR", + [SIGSYS] = "SYS", +}; + +int asan_giovese_deadly_signal(int signum, target_ulong addr, target_ulong pc, target_ulong bp, target_ulong sp) { + + struct call_context ctx; + asan_giovese_populate_context(&ctx, pc); + const char* error_type = singal_to_string[signum]; + + fprintf(stderr, + ASAN_NAME_STR ":DEADLYSIGNAL\n" + "=================================================================\n" + ANSI_COLOR_HRED "==%d==ERROR: " ASAN_NAME_STR + ": %s on unknown address 0x" TARGET_FMT_lx " (pc 0x" TARGET_FMT_lx + " bp 0x" TARGET_FMT_lx " sp 0x" TARGET_FMT_lx " T%d)" ANSI_COLOR_RESET + "\n", + getpid(), error_type, addr, pc, bp, sp, ctx.tid); + + size_t i; + for (i = 0; i < ctx.size; ++i) { + + char* printable = asan_giovese_printaddr(ctx.addresses[i]); + if (printable) + fprintf(stderr, " #%zu 0x" TARGET_FMT_lx "%s\n", i, ctx.addresses[i], + printable); + else + fprintf(stderr, " #%zu 0x" TARGET_FMT_lx "\n", i, ctx.addresses[i]); + + } + + fputc('\n', stderr); + fprintf(stderr, ASAN_NAME_STR " can not provide additional info.\n"); + + const char* printable_pc = asan_giovese_printaddr(pc); + if (!printable_pc) printable_pc = ""; + fprintf(stderr, + "SUMMARY: " ASAN_NAME_STR + ": %s\n", printable_pc); + + fprintf(stderr, "==%d==ABORTING\n", getpid()); + return signum; + +} + +int asan_giovese_badfree(target_ulong addr, target_ulong pc) { + + struct call_context ctx; + asan_giovese_populate_context(&ctx, pc); + + fprintf(stderr, + "=================================================================" + "\n" ANSI_COLOR_HRED "==%d==ERROR: " ASAN_NAME_STR + ": attempting free on address which was not malloc()-ed: 0x" + TARGET_FMT_lx " in thread T%d" ANSI_COLOR_RESET "\n", getpid(), addr, + ctx.tid); + + size_t i; + for (i = 0; i < ctx.size; ++i) { + + char* printable = asan_giovese_printaddr(ctx.addresses[i]); + if (printable) + fprintf(stderr, " #%zu 0x" TARGET_FMT_lx "%s\n", i, ctx.addresses[i], + printable); + else + fprintf(stderr, " #%zu 0x" TARGET_FMT_lx "\n", i, ctx.addresses[i]); + + } + + fputc('\n', stderr); + print_alloc_location(addr, addr); + + const char* printable_pc = asan_giovese_printaddr(pc); + if (!printable_pc) printable_pc = ""; + fprintf(stderr, + "SUMMARY: " ASAN_NAME_STR + ": bad-free %s\n", printable_pc); + + fprintf(stderr, "==%d==ABORTING\n", getpid()); + signal(SIGABRT, SIG_DFL); + abort(); + +} + diff --git a/qemuafl/asan-giovese.h b/qemuafl/asan-giovese.h new file mode 100644 index 0000000000000..499c52e2e0a3f --- /dev/null +++ b/qemuafl/asan-giovese.h @@ -0,0 +1,155 @@ +/******************************************************************************* +BSD 2-Clause License + +Copyright (c) 2020-2021, Andrea Fioraldi +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ + +#ifndef __ASAN_GIOVESE_H__ +#define __ASAN_GIOVESE_H__ + +#include +#include +#include +#include "common.h" + +#ifndef ASAN_NAME_STR +#define ASAN_NAME_STR "AddressSanitizer" +#endif + +#define HIGH_SHADOW_ADDR ((void*)0x02008fff7000ULL) +#define LOW_SHADOW_ADDR ((void*)0x00007fff8000ULL) +#define GAP_SHADOW_ADDR ((void*)0x00008fff7000) + +#define HIGH_SHADOW_SIZE (0xdfff0000fffULL) +#define LOW_SHADOW_SIZE (0xfffefffULL) +#define GAP_SHADOW_SIZE (0x1ffffffffff) + +#define SHADOW_OFFSET (0x7fff8000ULL) + +/* shadow map byte values */ +#define ASAN_VALID 0x00 +#define ASAN_PARTIAL1 0x01 +#define ASAN_PARTIAL2 0x02 +#define ASAN_PARTIAL3 0x03 +#define ASAN_PARTIAL4 0x04 +#define ASAN_PARTIAL5 0x05 +#define ASAN_PARTIAL6 0x06 +#define ASAN_PARTIAL7 0x07 +#define ASAN_ARRAY_COOKIE 0xac +#define ASAN_STACK_RZ 0xf0 +#define ASAN_STACK_LEFT_RZ 0xf1 +#define ASAN_STACK_MID_RZ 0xf2 +#define ASAN_STACK_RIGHT_RZ 0xf3 +#define ASAN_STACK_FREED 0xf5 +#define ASAN_STACK_OOSCOPE 0xf8 +#define ASAN_GLOBAL_RZ 0xf9 +#define ASAN_HEAP_RZ 0xe9 +#define ASAN_USER 0xf7 +#define ASAN_HEAP_LEFT_RZ 0xfa +#define ASAN_HEAP_RIGHT_RZ 0xfb +#define ASAN_HEAP_FREED 0xfd + +enum { + + ACCESS_TYPE_LOAD, + ACCESS_TYPE_STORE, + +}; + +struct call_context { + + target_ulong* addresses; + uint32_t tid; + uint32_t size; + +}; + +struct chunk_info { + + target_ulong start; + target_ulong end; + struct call_context* alloc_ctx; + struct call_context* free_ctx; // NULL if chunk is allocated + +}; + +extern void* __ag_high_shadow; +extern void* __ag_low_shadow; + +// ------------------------------------------------------------------------- // +// Virtual functions, you have to implement them +// ------------------------------------------------------------------------- // + +/////////////////////////////////////////////////////////////////////////////// +void asan_giovese_populate_context(struct call_context* ctx, target_ulong pc); +char* asan_giovese_printaddr(target_ulong addr); +/////////////////////////////////////////////////////////////////////////////// + +// ------------------------------------------------------------------------- // +// Exposed functions +// ------------------------------------------------------------------------- // + +void asan_giovese_init(void); + +// this has to be fast, ptr is an host pointer + +int asan_giovese_load1(void* ptr); +int asan_giovese_load2(void* ptr); +int asan_giovese_load4(void* ptr); +int asan_giovese_load8(void* ptr); +int asan_giovese_store1(void* ptr); +int asan_giovese_store2(void* ptr); +int asan_giovese_store4(void* ptr); +int asan_giovese_store8(void* ptr); +int asan_giovese_loadN(void* ptr, size_t n); +int asan_giovese_storeN(void* ptr, size_t n); +int asan_giovese_guest_loadN(target_ulong addr, size_t n); +int asan_giovese_guest_storeN(target_ulong addr, size_t n); + +int asan_giovese_poison_region(void* ptr, size_t n, + uint8_t poison_byte); +int asan_giovese_user_poison_region(void* ptr, size_t n); +int asan_giovese_unpoison_region(void* ptr, size_t n); + +int asan_giovese_poison_guest_region(target_ulong addr, size_t n, uint8_t poison_byte); +int asan_giovese_user_poison_guest_region(target_ulong addr, size_t n); +int asan_giovese_unpoison_guest_region(target_ulong addr, size_t n); + +// addr is a guest pointer + +int asan_giovese_report_and_crash(int access_type, target_ulong addr, size_t n, + CPUArchState *env); + +int asan_giovese_deadly_signal(int signum, target_ulong addr, target_ulong pc, + target_ulong bp, target_ulong sp); + +int asan_giovese_badfree(target_ulong addr, target_ulong pc); + +struct chunk_info* asan_giovese_alloc_search(target_ulong query); +void asan_giovese_alloc_insert(target_ulong start, target_ulong end, + struct call_context* alloc_ctx); + +#endif + diff --git a/qemuafl/common.h b/qemuafl/common.h new file mode 100644 index 0000000000000..6384a07cc08ef --- /dev/null +++ b/qemuafl/common.h @@ -0,0 +1,198 @@ +/* + american fuzzy lop++ - high-performance binary-only instrumentation + ------------------------------------------------------------------- + + Originally written by Andrew Griffiths and + Michal Zalewski + + TCG instrumentation and block chaining support by Andrea Biondo + + + QEMU 3.1.1 port, TCG thread-safety, CompareCoverage and NeverZero + counters by Andrea Fioraldi + + Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019-2020 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This code is a shim patched into the separately-distributed source + code of QEMU 3.1.0. It leverages the built-in QEMU tracing functionality + to implement AFL-style instrumentation and to take care of the remaining + parts of the AFL fork server logic. + + The resulting QEMU binary is essentially a standalone instrumentation + tool; for an example of how to leverage it for other purposes, you can + have a look at afl-showmap.c. + + */ + +#ifndef __AFL_QEMU_COMMON +#define __AFL_QEMU_COMMON + +#include "imported/config.h" +#include "imported/types.h" +#include "imported/cmplog.h" +#include "api.h" + +/* We use one additional file descriptor to relay "needs translation" + messages between the child and the fork server. */ + +#define TSL_FD (FORKSRV_FD - 1) + +#define AFL_G2H g2h_untagged + +#if defined(TARGET_X86_64) +#define api_regs x86_64_regs +#elif defined(TARGET_I386) +#define api_regs x86_regs +#elif defined(TARGET_AARCH64) +#define api_regs arm64_regs +#elif defined(TARGET_ARM) +#define api_regs arm_regs +/* MIPS_PATCH */ +#elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) +#define api_regs mips_regs +#elif defined(TARGET_PPC) +#define api_regs ppc_regs +#else +struct generic_api_regs { int v; }; +#define api_regs generic_api_regs +#endif + +/* NeverZero */ + +#if (defined(__x86_64__) || defined(__i386__)) && defined(AFL_QEMU_NOT_ZERO) + #define INC_AFL_AREA(loc) \ + asm volatile( \ + "addb $1, (%0, %1, 1)\n" \ + "adcb $0, (%0, %1, 1)\n" \ + : /* no out */ \ + : "r"(afl_area_ptr), "r"(loc) \ + : "memory", "eax") +#else + #define INC_AFL_AREA(loc) afl_area_ptr[loc]++ +#endif + +typedef void (*afl_persistent_hook_fn)(struct api_regs *regs, + uint64_t guest_base, + uint8_t *input_buf, + uint32_t input_buf_len); + +/* Declared in afl-qemu-cpu-inl.h */ + +// This structure is used for tracking which +// code is to be instrumented via afl_instr_code. +struct vmrange { + target_ulong start, end; + char* name; + bool exclude; // Exclude this region rather than include it + struct vmrange* next; +}; + +extern struct vmrange* afl_instr_code; +extern unsigned char *afl_area_ptr; +extern unsigned int afl_inst_rms; +extern abi_ulong afl_entry_point, afl_exit_point, afl_start_code, afl_end_code; +extern abi_ulong afl_persistent_addr; +extern abi_ulong afl_persistent_ret_addr; +extern u8 afl_compcov_level; +extern unsigned char afl_fork_child; +extern unsigned int afl_forksrv_pid; +extern unsigned char is_persistent; +extern target_long persistent_stack_offset; +extern unsigned char persistent_first_pass; +extern unsigned char persistent_exits; +extern unsigned char persistent_save_gpr; +extern unsigned char persistent_memory; +extern int persisent_retaddr_offset; +extern int use_qasan; +extern __thread int cur_block_is_good; +extern struct api_regs saved_regs; + +extern u8 * shared_buf; +extern u32 *shared_buf_len; +extern u8 sharedmem_fuzzing; + +extern afl_persistent_hook_fn afl_persistent_hook_ptr; + +extern __thread abi_ulong afl_prev_loc; + +extern struct cmp_map *__afl_cmp_map; + +void afl_setup(void); +void afl_forkserver(CPUState *cpu); +void afl_persistent_iter(CPUArchState *env); +void afl_persistent_loop(CPUArchState *env); + +// void afl_debug_dump_saved_regs(void); + +void afl_gen_tcg_plain_call(void *func); + +void afl_float_compcov_log_32(target_ulong cur_loc, float32 arg1, float32 arg2, + void *status); +void afl_float_compcov_log_64(target_ulong cur_loc, float64 arg1, float64 arg2, + void *status); +void afl_float_compcov_log_80(target_ulong cur_loc, floatx80 arg1, + floatx80 arg2); + +abi_ulong afl_get_brk(void); +abi_ulong afl_set_brk(abi_ulong new_brk); + +#if defined(TARGET_X86_64) || defined(TARGET_I386) || defined(TARGET_AARCH64) || defined(TARGET_ARM) || defined(TARGET_MIPS) || defined(TARGET_MIPS64) || defined(TARGET_PPC) +void afl_save_regs(struct api_regs* regs, CPUArchState* env); +void afl_restore_regs(struct api_regs* regs, CPUArchState* env); +#else +static void afl_save_regs(struct api_regs* regs, CPUArchState* env) {} +static void afl_restore_regs(struct api_regs* regs, CPUArchState* env) {} +#endif + +void afl_target_unmap_trackeds(void); + +int open_self_maps(void *cpu_env, int fd); + +TranslationBlock *afl_gen_edge(CPUState *cpu, unsigned long afl_id); + +/* Check if an address is valid in the current mapping */ + +static inline int is_valid_addr(target_ulong addr) { + + int flags; + target_ulong page; + + page = addr & TARGET_PAGE_MASK; + + flags = page_get_flags(page); + if (!(flags & PAGE_VALID) || !(flags & PAGE_READ)) return 0; + + return 1; + +} + +static inline int afl_must_instrument(target_ulong addr) { + + // Reject any exclusion regions + for (struct vmrange* n = afl_instr_code; n; n = n->next) { + if (n->exclude && addr < n->end && addr >= n->start) + return 0; + } + + // Check for inclusion in instrumentation regions + if (addr < afl_end_code && addr >= afl_start_code) + return 1; + + for (struct vmrange* n = afl_instr_code; n; n = n->next) { + if (!n->exclude && addr < n->end && addr >= n->start) + return 1; + } + + return 0; + +} + +#endif + diff --git a/qemuafl/cpu-translate.h b/qemuafl/cpu-translate.h new file mode 100644 index 0000000000000..bb3e9598dab0d --- /dev/null +++ b/qemuafl/cpu-translate.h @@ -0,0 +1,177 @@ +/* + american fuzzy lop++ - high-performance binary-only instrumentation + ------------------------------------------------------------------- + + Originally written by Andrew Griffiths and + Michal Zalewski + + TCG instrumentation and block chaining support by Andrea Biondo + + + QEMU 3.1.1 port, TCG thread-safety, CompareCoverage and NeverZero + counters by Andrea Fioraldi + + Copyright 2015, 2016, 2017 Google Inc. All rights reserved. + Copyright 2019-2020 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + http://www.apache.org/licenses/LICENSE-2.0 + + This code is a shim patched into the separately-distributed source + code of QEMU 3.1.0. It leverages the built-in QEMU tracing functionality + to implement AFL-style instrumentation and to take care of the remaining + parts of the AFL fork server logic. + + The resulting QEMU binary is essentially a standalone instrumentation + tool; for an example of how to leverage it for other purposes, you can + have a look at afl-showmap.c. + + */ + +#include "common.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" + +uint32_t afl_hash_ip(uint64_t); + +#if TARGET_LONG_BITS == 64 + #define _DEFAULT_MO MO_64 +#else + #define _DEFAULT_MO MO_32 +#endif + +static void afl_gen_compcov(target_ulong cur_loc, TCGv arg1, TCGv arg2, + MemOp ot, int is_imm) { + + if (!afl_must_instrument(cur_loc)) return; + + if (__afl_cmp_map) { + + cur_loc = (uintptr_t)(afl_hash_ip((uint64_t)cur_loc)); + cur_loc &= (CMP_MAP_W - 1); + + TCGv cur_loc_v = tcg_const_tl(cur_loc); + + switch (ot & MO_SIZE) { + + case MO_64: + gen_helper_afl_cmplog_64(cur_loc_v, arg1, arg2); + break; + case MO_32: + gen_helper_afl_cmplog_32(cur_loc_v, arg1, arg2); + break; + case MO_16: + gen_helper_afl_cmplog_16(cur_loc_v, arg1, arg2); + break; + case MO_8: + gen_helper_afl_cmplog_8(cur_loc_v, arg1, arg2); + break; + default: + break; + + } + + tcg_temp_free(cur_loc_v); + + } else if (afl_compcov_level) { + + if (!is_imm && afl_compcov_level < 2) return; + + cur_loc = (uintptr_t)(afl_hash_ip((uint64_t)cur_loc)); + cur_loc &= (MAP_SIZE - 1); + + TCGv cur_loc_v = tcg_const_tl(cur_loc); + + if (cur_loc >= afl_inst_rms) return; + + switch (ot & MO_SIZE) { + + case MO_64: + gen_helper_afl_compcov_64(cur_loc_v, arg1, arg2); + break; + case MO_32: + gen_helper_afl_compcov_32(cur_loc_v, arg1, arg2); + break; + case MO_16: + gen_helper_afl_compcov_16(cur_loc_v, arg1, arg2); + break; + default: + break; + + } + + tcg_temp_free(cur_loc_v); + + } + +} + +/* Routines for debug */ +/* +static void log_x86_saved_gpr(void) { + + static const char reg_names[CPU_NB_REGS][4] = { + +#ifdef TARGET_X86_64 + [R_EAX] = "rax", + [R_EBX] = "rbx", + [R_ECX] = "rcx", + [R_EDX] = "rdx", + [R_ESI] = "rsi", + [R_EDI] = "rdi", + [R_EBP] = "rbp", + [R_ESP] = "rsp", + [8] = "r8", + [9] = "r9", + [10] = "r10", + [11] = "r11", + [12] = "r12", + [13] = "r13", + [14] = "r14", + [15] = "r15", +#else + [R_EAX] = "eax", + [R_EBX] = "ebx", + [R_ECX] = "ecx", + [R_EDX] = "edx", + [R_ESI] = "esi", + [R_EDI] = "edi", + [R_EBP] = "ebp", + [R_ESP] = "esp", +#endif + + }; + + int i; + for (i = 0; i < CPU_NB_REGS; ++i) { + + fprintf(stderr, "%s = %lx\n", reg_names[i], persistent_saved_gpr[i]); + + } + +} + +static void log_x86_sp_content(void) { + + fprintf(stderr, ">> SP = %lx -> %lx\n", persistent_saved_gpr[R_ESP], +*(unsigned long*)persistent_saved_gpr[R_ESP]); + +}*/ + +static void restore_sp_for_persistent(TCGv sp) { + + if (!persistent_save_gpr && afl_persistent_ret_addr == 0) { + + TCGv_ptr stack_off_ptr = tcg_const_ptr(&persistent_stack_offset); + TCGv stack_off = tcg_temp_new(); + tcg_gen_ld_tl(stack_off, stack_off_ptr, 0); + tcg_gen_sub_tl(sp, sp, stack_off); + tcg_temp_free(stack_off); + + } + +} + diff --git a/qemuafl/imported/afl_hash.h b/qemuafl/imported/afl_hash.h new file mode 100644 index 0000000000000..c7075a08071bb --- /dev/null +++ b/qemuafl/imported/afl_hash.h @@ -0,0 +1,74 @@ + +#ifndef _AFL_HASH_H + +#define _AFL_HASH_H + +/* This is an exerpt of xxhash/XXH3 to prevent colliding with xxhash that is + in QEMU */ + +#pragma GCC optimize 3 + +#include +#include +#include + +uint32_t afl_hash_ip(uint64_t ip); +uint64_t AFL_readLE64(const void *memPtr); +uint64_t AFL_rrmxmx(uint64_t h64, uint64_t len); + +#define AFL_rotl64(x, r) (((x) << (r)) | ((x) >> (64 - (r)))) + +inline uint64_t AFL_readLE64(const void *memPtr) { + + const uint8_t *bytePtr = (const uint8_t *)memPtr; + return bytePtr[0] | ((uint64_t)bytePtr[1] << 8) | ((uint64_t)bytePtr[2] << 16) | + ((uint64_t)bytePtr[3] << 24) | ((uint64_t)bytePtr[4] << 32) | + ((uint64_t)bytePtr[5] << 40) | ((uint64_t)bytePtr[6] << 48) | + ((uint64_t)bytePtr[7] << 56); + +} + +inline uint64_t AFL_rrmxmx(uint64_t h64, uint64_t len) { + + /* this mix is inspired by Pelle Evensen's rrmxmx */ + h64 ^= AFL_rotl64(h64, 49) ^ AFL_rotl64(h64, 24); + h64 *= 0x9FB21C651E98DF25ULL; + h64 ^= (h64 >> 35) + len; + h64 *= 0x9FB21C651E98DF25ULL; + return h64 ^ (h64 >> 28); + +} + +inline uint32_t afl_hash_ip(uint64_t ip) { + + const uint8_t secret[] = { + + 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, + 0xf7, 0x21, 0xad, 0x1c, 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, + 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, 0xcb, 0x79, 0xe6, 0x4e, + 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, + 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, + 0x81, 0x3a, 0x26, 0x4c, 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, + 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, 0x71, 0x64, 0x48, 0x97, + 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, + 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, + 0xc7, 0x0b, 0x4f, 0x1d, 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, + 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, 0xea, 0xc5, 0xac, 0x83, + 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, + 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, + 0x29, 0xd4, 0x68, 0x9e, 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, + 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, 0x45, 0xcb, 0x3a, 0x8f, + 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, + +}; + + uint32_t const input1 = (uint32_t)(ip & 0xffffffff); + uint32_t const input2 = (uint32_t)(ip >> 32); + uint64_t const bitflip = (AFL_readLE64(secret + 8) ^ AFL_readLE64(secret + 16)); + uint64_t const input64 = input2 + (((uint64_t)input1) << 32); + uint64_t const keyed = input64 ^ bitflip; + return AFL_rrmxmx(keyed, 8); + +} + +#endif diff --git a/qemuafl/imported/cmplog.h b/qemuafl/imported/cmplog.h new file mode 100644 index 0000000000000..e45b1092d45d5 --- /dev/null +++ b/qemuafl/imported/cmplog.h @@ -0,0 +1,92 @@ +/* + american fuzzy lop++ - cmplog header + ------------------------------------ + + Originally written by Michal Zalewski + + Forkserver design by Jann Horn + + Now maintained by Marc Heuse , + Heiko Eissfeldt , + Andrea Fioraldi , + Dominik Maier + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + https://www.apache.org/licenses/LICENSE-2.0 + + Shared code to handle the shared memory. This is used by the fuzzer + as well the other components like afl-tmin, afl-showmap, etc... + + */ + +#ifndef _AFL_CMPLOG_H +#define _AFL_CMPLOG_H + +#include "config.h" + +#define CMPLOG_LVL_MAX 3 + +#define CMP_MAP_W 65536 +#define CMP_MAP_H 32 +#define CMP_MAP_RTN_H (CMP_MAP_H / 2) + +#define SHAPE_BYTES(x) (x + 1) + +#define CMP_TYPE_INS 0 +#define CMP_TYPE_RTN 1 + +struct cmp_header { // 16 bit = 2 bytes + + unsigned hits : 6; // up to 63 entries, we have CMP_MAP_H = 32 + unsigned shape : 5; // 31+1 bytes max + unsigned type : 1; // 2: cmp, rtn + unsigned attribute : 4; // 16 for arithmetic comparison types + +} __attribute__((packed)); + +struct cmp_operands { + + u64 v0; + u64 v0_128; + u64 v0_256_0; // u256 is unsupported by any compiler for now, so future use + u64 v0_256_1; + u64 v1; + u64 v1_128; + u64 v1_256_0; + u64 v1_256_1; + u8 unused[8]; // 2 bits could be used for "is constant operand" + +} __attribute__((packed)); + +struct cmpfn_operands { + + u8 v0[32]; + u8 v1[32]; + u8 v0_len; + u8 v1_len; + u8 unused[6]; // 2 bits could be used for "is constant operand" + +} __attribute__((packed)); + +typedef struct cmp_operands cmp_map_list[CMP_MAP_H]; + +struct cmp_map { + + struct cmp_header headers[CMP_MAP_W]; + struct cmp_operands log[CMP_MAP_W][CMP_MAP_H]; + +}; + +/* Execs the child */ + +struct afl_forkserver; +void cmplog_exec_child(struct afl_forkserver *fsrv, char **argv); + +#endif + diff --git a/qemuafl/imported/config.h b/qemuafl/imported/config.h new file mode 100644 index 0000000000000..5ac0540d4efd3 --- /dev/null +++ b/qemuafl/imported/config.h @@ -0,0 +1,565 @@ +/* + american fuzzy lop++ - vaguely configurable bits + ------------------------------------------------ + + Originally written by Michal Zalewski + + Now maintained by Marc Heuse , + Dominik Maier + Andrea Fioraldi , + Heiko Eissfeldt , + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + https://www.apache.org/licenses/LICENSE-2.0 + + */ + +#ifndef _HAVE_CONFIG_H +#define _HAVE_CONFIG_H + +/* Version string: */ + +// c = release, a = volatile github dev, e = experimental branch +#define VERSION "++4.32a" + +/****************************************************** + * * + * Settings that may be of interest to power users: * + * * + ******************************************************/ + +/* Default shared memory map size. Most targets just need a coverage map + between 20-250kb. Plus there is an auto-detection feature in afl-fuzz. + However if a target has problematic constructors and init arrays then + this can fail. Hence afl-fuzz deploys a larger default map. The largest + map seen so far is the xlsx fuzzer for libreoffice which is 5MB. + At runtime this value can be overridden via AFL_MAP_SIZE. + Default: 8MB (defined in bytes) */ +#define DEFAULT_SHMEM_SIZE (8 * 1024 * 1024) + +/* Default time until when no more coverage finds are happening afl-fuzz + switches to exploitation mode. It automatically switches back when new + coverage is found. + Default: 300 (seconds) */ +#define STRATEGY_SWITCH_TIME 1000 + +/* Default file permission umode when creating files (default: 0600) */ +#define DEFAULT_PERMISSION 0600 + +#ifdef __APPLE__ + #include + #if TARGET_OS_IOS + #undef DEFAULT_PERMISSION + #define DEFAULT_PERMISSION 0666 + #endif +#endif +#ifdef __ANDROID__ + #undef DEFAULT_PERMISSION + #define DEFAULT_PERMISSION 0666 +#endif + +/* SkipDet's global configuration */ + +#define MINIMAL_BLOCK_SIZE 64 +#define SMALL_DET_TIME (60 * 1000 * 1000U) +#define MAXIMUM_INF_EXECS (16 * 1024U) +#define MAXIMUM_QUICK_EFF_EXECS (64 * 1024U) +#define THRESHOLD_DEC_TIME (20 * 60 * 1000U) + +/* Set the Prob of selecting eff_bytes 3 times more than original, + Now disabled */ +#define EFF_HAVOC_RATE 3 + +/* CMPLOG/REDQUEEN TUNING + * + * Here you can modify tuning and solving options for CMPLOG. + * Note that these are run-time options for afl-fuzz, no target + * recompilation required. + * + */ + +/* If a redqueen pass finds more than one solution, try to combine them? */ +#define CMPLOG_COMBINE + +/* Minimum % of the corpus to perform cmplog on. Default: 10% */ +#define CMPLOG_CORPUS_PERCENT 5U + +/* Number of potential positions from which we decide if cmplog becomes + useless, default 12288 */ +#define CMPLOG_POSITIONS_MAX (12 * 1024) + +/* Maximum allowed fails per CMP value. Default: 96 */ +#define CMPLOG_FAIL_MAX 96 + +/* + * Effective fuzzing with selective feeding inputs + */ + +#define MAX_EXTRA_SAN_BINARY 4 + +/* -------------------------------------*/ +/* Now non-cmplog configuration options */ +/* -------------------------------------*/ + +/* If a persistent target keeps state and found crashes are not reproducible + then enable this option and set the AFL_PERSISTENT_RECORD env variable + to a number. These number of testcases prior and including the crash case + will be kept and written to the crash/ directory as RECORD:... files. + Note that every crash will be written, not only unique ones! */ + +// #define AFL_PERSISTENT_RECORD + +/* Adds support in compiler-rt to replay persistent records in @@-style + * harnesses */ + +// #define AFL_PERSISTENT_REPLAY_ARGPARSE + +/* console output colors: There are three ways to configure its behavior + * 1. default: colored outputs fixed on: defined USE_COLOR && defined + * ALWAYS_COLORED The env var. AFL_NO_COLOR will have no effect + * 2. defined USE_COLOR && !defined ALWAYS_COLORED + * -> depending on env var AFL_NO_COLOR=1 colors can be switched off + * at run-time. Default is to use colors. + * 3. colored outputs fixed off: !defined USE_COLOR + * The env var. AFL_NO_COLOR will have no effect + */ + +/* Comment out to disable terminal colors (note that this makes afl-analyze + a lot less nice): */ + +#define USE_COLOR + +#ifdef USE_COLOR + /* Comment in to always enable terminal colors */ + /* Comment out to enable runtime controlled terminal colors via AFL_NO_COLOR + */ + #define ALWAYS_COLORED 1 +#endif + +/* StatsD config + Config can be adjusted via AFL_STATSD_HOST and AFL_STATSD_PORT environment + variable. +*/ +#define STATSD_UPDATE_SEC 1 +#define STATSD_DEFAULT_PORT 8125 +#define STATSD_DEFAULT_HOST "127.0.0.1" + +/* If you want to have the original afl internal memory corruption checks. + Disabled by default for speed. it is better to use "make ASAN_BUILD=1". */ + +// #define _WANT_ORIGINAL_AFL_ALLOC + +/* Comment out to disable fancy boxes and use poor man's 7-bit UI: */ + +#ifndef DISABLE_FANCY + #define FANCY_BOXES +#endif + +/* Default timeout for fuzzed code (milliseconds). This is the upper bound, + also used for detecting hangs; the actual value is auto-scaled: */ + +#define EXEC_TIMEOUT 1000U + +/* Timeout rounding factor when auto-scaling (milliseconds): */ + +#define EXEC_TM_ROUND 20U + +/* 64bit arch MACRO */ +#if (defined(__x86_64__) || defined(__arm64__) || defined(__aarch64__)) + #define WORD_SIZE_64 1 +#endif + +/* Default memory limit for child process (MB) 0 = disabled : */ + +#define MEM_LIMIT 0U + +/* Default memory limit when running in QEMU mode (MB) 0 = disabled : */ + +#define MEM_LIMIT_QEMU 0U + +/* Default memory limit when running in Unicorn mode (MB) 0 = disabled : */ + +#define MEM_LIMIT_UNICORN 0U + +/* Number of calibration cycles per every new test case (and for test + cases that show variable behavior): */ + +#define CAL_CYCLES_FAST 3U +#define CAL_CYCLES 7U +#define CAL_CYCLES_LONG 12U + +/* Number of subsequent timeouts before abandoning an input file: */ + +#define TMOUT_LIMIT 250U + +/* Maximum number of unique hangs or crashes to record: */ + +#define KEEP_UNIQUE_HANG 500U +#define KEEP_UNIQUE_CRASH 10000U + +/* Baseline number of random tweaks during a single 'havoc' stage: */ + +#define HAVOC_CYCLES 256U +#define HAVOC_CYCLES_INIT 1024U + +/* Maximum multiplier for the above (should be a power of two, beware + of 32-bit int overflows): */ + +#define HAVOC_MAX_MULT 64U +#define HAVOC_MAX_MULT_MOPT 64U + +/* Absolute minimum number of havoc cycles (after all adjustments): */ + +#define HAVOC_MIN 12U + +/* Power Schedule Divisor */ +#define POWER_BETA 1U +#define MAX_FACTOR (POWER_BETA * 32) + +/* Maximum stacking for havoc-stage tweaks. The actual value is calculated + like this: + + n = random between 1 and HAVOC_STACK_POW2 + stacking = 2^n + + In other words, the default (n = 4) produces 2, 4, 8, 16 + stacked tweaks: */ + +#define HAVOC_STACK_POW2 4U + +/* Caps on block sizes for cloning and deletion operations. Each of these + ranges has a 33% probability of getting picked, except for the first + two cycles where smaller blocks are favored: */ + +#define HAVOC_BLK_SMALL 32U +#define HAVOC_BLK_MEDIUM 128U +#define HAVOC_BLK_LARGE 1500U + +/* Extra-large blocks, selected very rarely (<5% of the time): */ + +#define HAVOC_BLK_XL 32768U + +/* Probabilities of skipping non-favored entries in the queue, expressed as + percentages: */ + +#define SKIP_TO_NEW_PROB 99 /* ...when there are new, pending favorites */ +#define SKIP_NFAV_OLD_PROB 95 /* ...no new favs, cur entry already fuzzed */ +#define SKIP_NFAV_NEW_PROB 75 /* ...no new favs, cur entry not fuzzed yet */ + +/* Splicing cycle count: */ + +#define SPLICE_CYCLES 15 + +/* Nominal per-splice havoc cycle length: */ + +#define SPLICE_HAVOC 32 + +/* Maximum offset for integer addition / subtraction stages: */ + +#define ARITH_MAX 35 + +/* Limits for the test case trimmer. The absolute minimum chunk size; and + the starting and ending divisors for chopping up the input file: */ + +#define TRIM_MIN_BYTES 4 +#define TRIM_START_STEPS 16 +#define TRIM_END_STEPS 1024 + +/* Maximum size of input file, in bytes (keep under 100MB, default 1MB): + (note that if this value is changed, several areas in afl-cc.c, afl-fuzz.c + and afl-fuzz-state.c have to be changed as well! */ + +#define MAX_FILE (1 * 1024 * 1024L) + +/* The same, for the test case minimizer: */ + +#define TMIN_MAX_FILE (10 * 1024 * 1024L) + +/* Block normalization steps for afl-tmin: */ + +#define TMIN_SET_MIN_SIZE 4 +#define TMIN_SET_STEPS 128 + +/* Maximum dictionary token size (-x), in bytes: */ + +#define MAX_DICT_FILE 128 + +/* Length limits for auto-detected dictionary tokens: */ + +#define MIN_AUTO_EXTRA 3 +#define MAX_AUTO_EXTRA 32 + +/* Maximum number of user-specified dictionary tokens to use in deterministic + steps; past this point, the "extras/user" step will be still carried out, + but with proportionally lower odds: */ + +#define MAX_DET_EXTRAS 256 + +/* Maximum number of auto-extracted dictionary tokens to actually use in fuzzing + (first value), and to keep in memory as candidates. The latter should be much + higher than the former. */ + +#define USE_AUTO_EXTRAS 4096 +#define MAX_AUTO_EXTRAS (USE_AUTO_EXTRAS * 8) + +/* Scaling factor for the effector map used to skip some of the more + expensive deterministic steps. The actual divisor is set to + 2^EFF_MAP_SCALE2 bytes: */ + +#define EFF_MAP_SCALE2 3 + +/* Minimum input file length at which the effector logic kicks in: */ + +#define EFF_MIN_LEN 128 + +/* Maximum effector density past which everything is just fuzzed + unconditionally (%): */ + +#define EFF_MAX_PERC 90 + +/* UI refresh frequency (Hz): */ + +#define UI_TARGET_HZ 5 + +/* Fuzzer stats file, queue stats and plot update intervals (sec): */ + +#define STATS_UPDATE_SEC 60 +#define PLOT_UPDATE_SEC 5 +#define QUEUE_UPDATE_SEC 1800 + +/* Smoothing divisor for CPU load and exec speed stats (1 - no smoothing). */ + +#define AVG_SMOOTHING 16 + +/* Sync interval (every n havoc cycles): */ + +#define SYNC_INTERVAL 8 + +/* Sync time (minimum time between syncing in ms, time is halfed for -M main + nodes) - default is 20 minutes: */ + +#define SYNC_TIME (20 * 60 * 1000) + +/* Output directory reuse grace period (minutes): */ + +#define OUTPUT_GRACE 25 + +/* Uncomment to use simple file names (id_NNNNNN): */ + +// #define SIMPLE_FILES + +/* List of interesting values to use in fuzzing. */ + +#define INTERESTING_8 \ + -128, /* Overflow signed 8-bit when decremented */ \ + -1, /* */ \ + 0, /* */ \ + 1, /* */ \ + 16, /* One-off with common buffer size */ \ + 32, /* One-off with common buffer size */ \ + 64, /* One-off with common buffer size */ \ + 100, /* One-off with common buffer size */ \ + 127 /* Overflow signed 8-bit when incremented */ + +#define INTERESTING_8_LEN 9 + +#define INTERESTING_16 \ + -32768, /* Overflow signed 16-bit when decremented */ \ + -129, /* Overflow signed 8-bit */ \ + 128, /* Overflow signed 8-bit */ \ + 255, /* Overflow unsig 8-bit when incremented */ \ + 256, /* Overflow unsig 8-bit */ \ + 512, /* One-off with common buffer size */ \ + 1000, /* One-off with common buffer size */ \ + 1024, /* One-off with common buffer size */ \ + 4096, /* One-off with common buffer size */ \ + 32767 /* Overflow signed 16-bit when incremented */ + +#define INTERESTING_16_LEN 10 + +#define INTERESTING_32 \ + -2147483648LL, /* Overflow signed 32-bit when decremented */ \ + -100663046, /* Large negative number (endian-agnostic) */ \ + -32769, /* Overflow signed 16-bit */ \ + 32768, /* Overflow signed 16-bit */ \ + 65535, /* Overflow unsig 16-bit when incremented */ \ + 65536, /* Overflow unsig 16 bit */ \ + 100663045, /* Large positive number (endian-agnostic) */ \ + 2139095040, /* float infinite */ \ + 2147483647 /* Overflow signed 32-bit when incremented */ + +#define INTERESTING_32_LEN 9 + +/*********************************************************** + * * + * Really exotic stuff you probably don't want to touch: * + * * + ***********************************************************/ + +/* Call count interval between reseeding the PRNG from /dev/urandom: */ + +#define RESEED_RNG 2500000 + +/* The default maximum testcase cache size in MB, 0 = disable. + A value between 50 and 250 is a good default value. Note that the + number of entries will be auto assigned if not specified via the + AFL_TESTCACHE_ENTRIES env variable */ + +#define TESTCASE_CACHE_SIZE 50 + +/* Maximum line length passed from GCC to 'as' and used for parsing + configuration files: */ + +#define MAX_LINE 8192 + +/* Environment variable used to pass SHM ID to the called program. */ + +#define SHM_ENV_VAR "__AFL_SHM_ID" + +/* Environment variable used to pass SHM FUZZ ID to the called program. */ + +#define SHM_FUZZ_ENV_VAR "__AFL_SHM_FUZZ_ID" + +/* Other less interesting, internal-only variables. */ + +#define CLANG_ENV_VAR "__AFL_CLANG_MODE" +#define AS_LOOP_ENV_VAR "__AFL_AS_LOOPCHECK" +#define PERSIST_ENV_VAR "__AFL_PERSISTENT" +#define DEFER_ENV_VAR "__AFL_DEFER_FORKSRV" + +/* In-code signatures for deferred and persistent mode. */ + +#define PERSIST_SIG "##SIG_AFL_PERSISTENT##" +#define DEFER_SIG "##SIG_AFL_DEFER_FORKSRV##" + +/* Distinctive bitmap signature used to indicate failed execution: */ + +#define EXEC_FAIL_SIG 0xfee1dead + +/* Distinctive exit code used to indicate MSAN trip condition: */ + +#define MSAN_ERROR 86 + +/* Distinctive exit code used to indicate LSAN trip condition: */ + +#define LSAN_ERROR 23 + +/* Designated file descriptors for forkserver commands (the application will + use FORKSRV_FD and FORKSRV_FD + 1): */ + +#define FORKSRV_FD 198 + +/* Fork server init timeout multiplier: we'll wait the user-selected + timeout plus this much for the fork server to spin up. */ + +#define FORK_WAIT_MULT 10 + +/* Calibration timeout adjustments, to be a bit more generous when resuming + fuzzing sessions or trying to calibrate already-added internal finds. + The first value is a percentage, the other is in milliseconds: */ + +#define CAL_TMOUT_PERC 125 +#define CAL_TMOUT_ADD 50 + +/* Number of chances to calibrate a case before giving up: */ + +#define CAL_CHANCES 3 + +/* Map size for the traced binary (2^MAP_SIZE_POW2). Must be greater than + 2; you probably want to keep it under 18 or so for performance reasons + (adjusting AFL_INST_RATIO when compiling is probably a better way to solve + problems with complex programs). You need to recompile the target binary + after changing this - otherwise, SEGVs may ensue. */ + +#define MAP_SIZE_POW2 16 + +/* Do not change this unless you really know what you are doing. */ + +#define MAP_SIZE (1U << MAP_SIZE_POW2) +#if MAP_SIZE <= 2097152 + #define MAP_INITIAL_SIZE (2 << 20) // = 2097152 +#else + #define MAP_INITIAL_SIZE MAP_SIZE +#endif + +/* Maximum allocator request size (keep well under INT_MAX): */ + +#define MAX_ALLOC 0x40000000 + +/* A made-up hashing seed: */ + +#define HASH_CONST 0xa5b35705 + +/* Constants for afl-gotcpu to control busy loop timing: */ + +#define CTEST_TARGET_MS 5000 +#define CTEST_CORE_TRG_MS 1000 +#define CTEST_BUSY_CYCLES (10 * 1000 * 1000) + +/* Enable NeverZero counters in QEMU mode */ + +#define AFL_QEMU_NOT_ZERO + +/* AFL RedQueen */ + +#define CMPLOG_SHM_ENV_VAR "__AFL_CMPLOG_SHM_ID" + +/* ASAN SHM ID */ +#define AFL_ASAN_FUZZ_SHM_ENV_VAR "__AFL_ASAN_SHM_ID" + +/* CPU Affinity lockfile env var */ + +#define CPU_AFFINITY_ENV_VAR "__AFL_LOCKFILE" + +/* Uncomment this to use inferior block-coverage-based instrumentation. Note + that you need to recompile the target binary for this to have any effect: */ + +// #define COVERAGE_ONLY + +/* Uncomment this to ignore hit counts and output just one bit per tuple. + As with the previous setting, you will need to recompile the target + binary: */ + +// #define SKIP_COUNTS + +/* Uncomment this to use instrumentation data to record newly discovered paths, + but do not use them as seeds for fuzzing. This is useful for conveniently + measuring coverage that could be attained by a "dumb" fuzzing algorithm: */ + +// #define IGNORE_FINDS + +/* Text mutations */ + +/* Minimum length of a queue input to be evaluated for "is_ascii"? */ + +#define AFL_TXT_MIN_LEN 12 + +/* Maximum length of a queue input to be evaluated for "is_ascii"? */ + +#define AFL_TXT_MAX_LEN 65535 + +/* What is the minimum percentage of ascii characters present to be classified + as "is_ascii"? */ + +#define AFL_TXT_MIN_PERCENT 99 + +/* How often to perform ASCII mutations 0 = disable, 1-8 are good values */ + +#define AFL_TXT_BIAS 6 + +/* Maximum length of a string to tamper with */ + +#define AFL_TXT_STRING_MAX_LEN 1024 + +/* Maximum mutations on a string */ + +#define AFL_TXT_STRING_MAX_MUTATIONS 6 + +#endif /* ! _HAVE_CONFIG_H */ + diff --git a/qemuafl/imported/snapshot-inl.h b/qemuafl/imported/snapshot-inl.h new file mode 100644 index 0000000000000..e577b013d6cc7 --- /dev/null +++ b/qemuafl/imported/snapshot-inl.h @@ -0,0 +1,115 @@ +/* + american fuzzy lop++ - snapshot helpers routines + ------------------------------------------------ + + Originally written by Michal Zalewski + + Forkserver design by Jann Horn + + Now maintained by Marc Heuse , + Heiko Eissfeldt , + Andrea Fioraldi , + Dominik Maier + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + https://www.apache.org/licenses/LICENSE-2.0 + + */ + +// From AFL-Snapshot-LKM/include/afl_snapshot.h (must be kept synced) + +#include +#include +#include + +#define AFL_SNAPSHOT_FILE_NAME "/dev/afl_snapshot" + +#define AFL_SNAPSHOT_IOCTL_MAGIC 44313 + +#define AFL_SNAPSHOT_IOCTL_DO _IO(AFL_SNAPSHOT_IOCTL_MAGIC, 1) +#define AFL_SNAPSHOT_IOCTL_CLEAN _IO(AFL_SNAPSHOT_IOCTL_MAGIC, 2) +#define AFL_SNAPSHOT_EXCLUDE_VMRANGE \ + _IOR(AFL_SNAPSHOT_IOCTL_MAGIC, 3, struct afl_snapshot_vmrange_args *) +#define AFL_SNAPSHOT_INCLUDE_VMRANGE \ + _IOR(AFL_SNAPSHOT_IOCTL_MAGIC, 4, struct afl_snapshot_vmrange_args *) +#define AFL_SNAPSHOT_IOCTL_TAKE _IOR(AFL_SNAPSHOT_IOCTL_MAGIC, 5, int) +#define AFL_SNAPSHOT_IOCTL_RESTORE _IO(AFL_SNAPSHOT_IOCTL_MAGIC, 6) + +// Trace new mmaped ares and unmap them on restore. +#define AFL_SNAPSHOT_MMAP 1 +// Do not snapshot any page (by default all writeable not-shared pages +// are shanpshotted. +#define AFL_SNAPSHOT_BLOCK 2 +// Snapshot file descriptor state, close newly opened descriptors +#define AFL_SNAPSHOT_FDS 4 +// Snapshot registers state +#define AFL_SNAPSHOT_REGS 8 +// Perform a restore when exit_group is invoked +#define AFL_SNAPSHOT_EXIT 16 +// TODO(andrea) allow not COW snapshots (high perf on small processes) +// Disable COW, restore all the snapshotted pages +#define AFL_SNAPSHOT_NOCOW 32 +// Do not snapshot Stack pages +#define AFL_SNAPSHOT_NOSTACK 64 + +struct afl_snapshot_vmrange_args { + + unsigned long start, end; + +}; + +static int afl_snapshot_dev_fd; + +static int afl_snapshot_init(void) { + + afl_snapshot_dev_fd = open(AFL_SNAPSHOT_FILE_NAME, 0); + return afl_snapshot_dev_fd; + +} + +static void afl_snapshot_exclude_vmrange(void *start, void *end) { + + struct afl_snapshot_vmrange_args args = {(unsigned long)start, + (unsigned long)end}; + ioctl(afl_snapshot_dev_fd, AFL_SNAPSHOT_EXCLUDE_VMRANGE, &args); + +} + +static void afl_snapshot_include_vmrange(void *start, void *end) { + + struct afl_snapshot_vmrange_args args = {(unsigned long)start, + (unsigned long)end}; + ioctl(afl_snapshot_dev_fd, AFL_SNAPSHOT_INCLUDE_VMRANGE, &args); + +} + +static int afl_snapshot_take(int config) { + + return ioctl(afl_snapshot_dev_fd, AFL_SNAPSHOT_IOCTL_TAKE, config); + +} + +static int afl_snapshot_do(void) { + + return ioctl(afl_snapshot_dev_fd, AFL_SNAPSHOT_IOCTL_DO); + +} + +static void afl_snapshot_restore(void) { + + ioctl(afl_snapshot_dev_fd, AFL_SNAPSHOT_IOCTL_RESTORE); + +} + +static void afl_snapshot_clean(void) { + + ioctl(afl_snapshot_dev_fd, AFL_SNAPSHOT_IOCTL_CLEAN); + +} + diff --git a/qemuafl/imported/types.h b/qemuafl/imported/types.h new file mode 100644 index 0000000000000..d370bcfba28ab --- /dev/null +++ b/qemuafl/imported/types.h @@ -0,0 +1,204 @@ +/* + american fuzzy lop++ - type definitions and minor macros + -------------------------------------------------------- + + Originally written by Michal Zalewski + + Now maintained by Marc Heuse , + Heiko Eissfeldt , + Andrea Fioraldi , + Dominik Maier + + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + + https://www.apache.org/licenses/LICENSE-2.0 + + */ + +#ifndef _HAVE_TYPES_H +#define _HAVE_TYPES_H + +#include +#include +#include "config.h" + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +#ifdef WORD_SIZE_64 +typedef unsigned __int128 uint128_t; +typedef uint128_t u128; +#endif + +/* Extended forkserver option values */ + +/* Reporting errors */ +#define FS_OPT_ERROR 0xf800008f +#define FS_OPT_GET_ERROR(x) ((x & 0x00ffff00) >> 8) +#define FS_OPT_SET_ERROR(x) ((x & 0x0000ffff) << 8) +#define FS_ERROR_MAP_SIZE 1 +#define FS_ERROR_MAP_ADDR 2 +#define FS_ERROR_SHM_OPEN 4 +#define FS_ERROR_SHMAT 8 +#define FS_ERROR_MMAP 16 +#define FS_ERROR_OLD_CMPLOG 32 +#define FS_ERROR_OLD_CMPLOG_QEMU 64 + +/* New Forkserver */ +#define FS_NEW_VERSION_MIN 1 +#define FS_NEW_VERSION_MAX 1 +#define FS_NEW_ERROR 0xeffe0000 +#define FS_NEW_OPT_MAPSIZE 0x00000001 // parameter: 32 bit value +#define FS_NEW_OPT_SHDMEM_FUZZ 0x00000002 // parameter: none +#define FS_NEW_OPT_AUTODICT 0x00000800 // autodictionary data + +/* Reporting options */ +#define FS_OPT_ENABLED 0x80000001 +#define FS_OPT_MAPSIZE 0x40000000 +#define FS_OPT_SNAPSHOT 0x20000000 +#define FS_OPT_AUTODICT 0x10000000 +#define FS_OPT_SHDMEM_FUZZ 0x01000000 +#define FS_OPT_NEWCMPLOG 0x02000000 +#define FS_OPT_OLD_AFLPP_WORKAROUND 0x0f000000 +// FS_OPT_MAX_MAPSIZE is 8388608 = 0x800000 = 2^23 = 1 << 23 +#define FS_OPT_MAX_MAPSIZE ((0x00fffffeU >> 1) + 1) +#define FS_OPT_GET_MAPSIZE(x) (((x & 0x00fffffe) >> 1) + 1) +#define FS_OPT_SET_MAPSIZE(x) \ + (x <= 1 || x > FS_OPT_MAX_MAPSIZE ? 0 : ((x - 1) << 1)) + +typedef unsigned long long u64; + +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; +#ifdef WORD_SIZE_64 +typedef __int128 int128_t; +typedef int128_t s128; +#endif + +#ifndef MIN + #define MIN(a, b) \ + ({ \ + \ + __typeof__(a) _a = (a); \ + __typeof__(b) _b = (b); \ + _a < _b ? _a : _b; \ + \ + }) + + #define MAX(a, b) \ + ({ \ + \ + __typeof__(a) _a = (a); \ + __typeof__(b) _b = (b); \ + _a > _b ? _a : _b; \ + \ + }) + +#endif /* !MIN */ + +#define SWAP16(_x) \ + ({ \ + \ + u16 _ret = (_x); \ + (u16)((_ret << 8) | (_ret >> 8)); \ + \ + }) + +#define SWAP32(_x) \ + ({ \ + \ + u32 _ret = (_x); \ + (u32)((_ret << 24) | (_ret >> 24) | ((_ret << 8) & 0x00FF0000) | \ + ((_ret >> 8) & 0x0000FF00)); \ + \ + }) + +#define SWAP64(_x) \ + ({ \ + \ + u64 _ret = (_x); \ + _ret = \ + (_ret & 0x00000000FFFFFFFF) << 32 | (_ret & 0xFFFFFFFF00000000) >> 32; \ + _ret = \ + (_ret & 0x0000FFFF0000FFFF) << 16 | (_ret & 0xFFFF0000FFFF0000) >> 16; \ + _ret = \ + (_ret & 0x00FF00FF00FF00FF) << 8 | (_ret & 0xFF00FF00FF00FF00) >> 8; \ + _ret; \ + \ + }) + +// It is impossible to define 128 bit constants, so ... +#ifdef WORD_SIZE_64 + #define SWAPN(_x, _l) \ + ({ \ + \ + u128 _res = (_x), _ret; \ + char *d = (char *)&_ret, *s = (char *)&_res; \ + int i; \ + for (i = 0; i < 16; i++) \ + d[15 - i] = s[i]; \ + u32 sr = 128U - ((_l) << 3U); \ + (_ret >>= sr); \ + (u128) _ret; \ + \ + }) +#endif + +#define SWAPNN(_x, _y, _l) \ + ({ \ + \ + char *d = (char *)(_x), *s = (char *)(_y); \ + u32 i, l = (_l) - 1; \ + for (i = 0; i <= l; i++) \ + d[l - i] = s[i]; \ + \ + }) + +#ifdef AFL_LLVM_PASS + #if defined(__linux__) || !defined(__ANDROID__) + #define AFL_SR(s) (srandom(s)) + #define AFL_R(x) (random() % (x)) + #else + #define AFL_SR(s) ((void)s) + #define AFL_R(x) (arc4random_uniform(x)) + #endif +#else + #if defined(__linux__) || !defined(__ANDROID__) + #define SR(s) (srandom(s)) + #define R(x) (random() % (x)) + #else + #define SR(s) ((void)s) + #define R(x) (arc4random_uniform(x)) + #endif +#endif /* ^AFL_LLVM_PASS */ + +#define STRINGIFY_INTERNAL(x) #x +#define STRINGIFY(x) STRINGIFY_INTERNAL(x) + +#define MEM_BARRIER() __asm__ volatile("" ::: "memory") + +#if __GNUC__ < 6 + #ifndef likely + #define likely(_x) (_x) + #endif + #ifndef unlikely + #define unlikely(_x) (_x) + #endif +#else + #ifndef likely + #define likely(_x) __builtin_expect(!!(_x), 1) + #endif + #ifndef unlikely + #define unlikely(_x) __builtin_expect(!!(_x), 0) + #endif +#endif + +#endif /* ! _HAVE_TYPES_H */ + diff --git a/qemuafl/interval-tree/.gitignore b/qemuafl/interval-tree/.gitignore new file mode 100644 index 0000000000000..c4535366ac14c --- /dev/null +++ b/qemuafl/interval-tree/.gitignore @@ -0,0 +1,3 @@ +*.o +*~ +interval-tree-test diff --git a/qemuafl/interval-tree/COPYING b/qemuafl/interval-tree/COPYING new file mode 100644 index 0000000000000..5eb91743bdb04 --- /dev/null +++ b/qemuafl/interval-tree/COPYING @@ -0,0 +1,20 @@ +From interval_tree_generic.h: + + Interval Trees + (C) 2012 Michel Lespinasse + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + include/linux/interval_tree_generic.h diff --git a/qemuafl/interval-tree/compiler.h b/qemuafl/interval-tree/compiler.h new file mode 100644 index 0000000000000..1c820b5a06d09 --- /dev/null +++ b/qemuafl/interval-tree/compiler.h @@ -0,0 +1,17 @@ +#ifndef __INT_COMPILER_H__ +#define __INT_COMPILER_H__ + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#ifndef container_of +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) +#endif + +#endif /* __INT_COMPILER_H__ */ diff --git a/qemuafl/interval-tree/interval-tree.inl b/qemuafl/interval-tree/interval-tree.inl new file mode 100644 index 0000000000000..2d253df6ba3ae --- /dev/null +++ b/qemuafl/interval-tree/interval-tree.inl @@ -0,0 +1,2 @@ +#include "interval_tree_generic.h" +#include "rbtree.inl" diff --git a/qemuafl/interval-tree/interval_tree_generic.h b/qemuafl/interval-tree/interval_tree_generic.h new file mode 100644 index 0000000000000..e26c7322ceba6 --- /dev/null +++ b/qemuafl/interval-tree/interval_tree_generic.h @@ -0,0 +1,193 @@ +/* + Interval Trees + (C) 2012 Michel Lespinasse + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + include/linux/interval_tree_generic.h +*/ + +#include + +#include "rbtree_augmented.h" + +/* + * Template for implementing interval trees + * + * ITSTRUCT: struct type of the interval tree nodes + * ITRB: name of struct rb_node field within ITSTRUCT + * ITTYPE: type of the interval endpoints + * ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree + * ITSTART(n): start endpoint of ITSTRUCT node n + * ITLAST(n): last endpoint of ITSTRUCT node n + * ITSTATIC: 'static' or empty + * ITPREFIX: prefix to use for the inline tree definitions + * + * Note - before using this, please consider if non-generic version + * (interval_tree.h) would work for you... + */ + +#define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, \ + ITSTART, ITLAST, ITSTATIC, ITPREFIX) \ + \ +/* Callbacks for augmented rbtree insert and remove */ \ + \ +static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \ +{ \ + ITTYPE max = ITLAST(node), subtree_last; \ + if (node->ITRB.rb_left) { \ + subtree_last = rb_entry(node->ITRB.rb_left, \ + ITSTRUCT, ITRB)->ITSUBTREE; \ + if (max < subtree_last) \ + max = subtree_last; \ + } \ + if (node->ITRB.rb_right) { \ + subtree_last = rb_entry(node->ITRB.rb_right, \ + ITSTRUCT, ITRB)->ITSUBTREE; \ + if (max < subtree_last) \ + max = subtree_last; \ + } \ + return max; \ +} \ + \ +RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \ + ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \ + \ +/* Insert / remove interval nodes from the tree */ \ + \ +ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \ +{ \ + struct rb_node **link = &root->rb_node, *rb_parent = NULL; \ + ITTYPE start = ITSTART(node), last = ITLAST(node); \ + ITSTRUCT *parent; \ + \ + while (*link) { \ + rb_parent = *link; \ + parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \ + if (parent->ITSUBTREE < last) \ + parent->ITSUBTREE = last; \ + if (start < ITSTART(parent)) \ + link = &parent->ITRB.rb_left; \ + else \ + link = &parent->ITRB.rb_right; \ + } \ + \ + node->ITSUBTREE = last; \ + rb_link_node(&node->ITRB, rb_parent, link); \ + rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \ +} \ + \ +ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \ +{ \ + rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \ +} \ + \ +/* \ + * Iterate over intervals intersecting [start;last] \ + * \ + * Note that a node's interval intersects [start;last] iff: \ + * Cond1: ITSTART(node) <= last \ + * and \ + * Cond2: start <= ITLAST(node) \ + */ \ + \ +static ITSTRUCT * \ +ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ +{ \ + while (true) { \ + /* \ + * Loop invariant: start <= node->ITSUBTREE \ + * (Cond2 is satisfied by one of the subtree nodes) \ + */ \ + if (node->ITRB.rb_left) { \ + ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \ + ITSTRUCT, ITRB); \ + if (start <= left->ITSUBTREE) { \ + /* \ + * Some nodes in left subtree satisfy Cond2. \ + * Iterate to find the leftmost such node N. \ + * If it also satisfies Cond1, that's the \ + * match we are looking for. Otherwise, there \ + * is no matching interval as nodes to the \ + * right of N can't satisfy Cond1 either. \ + */ \ + node = left; \ + continue; \ + } \ + } \ + if (ITSTART(node) <= last) { /* Cond1 */ \ + if (start <= ITLAST(node)) /* Cond2 */ \ + return node; /* node is leftmost match */ \ + if (node->ITRB.rb_right) { \ + node = rb_entry(node->ITRB.rb_right, \ + ITSTRUCT, ITRB); \ + if (start <= node->ITSUBTREE) \ + continue; \ + } \ + } \ + return NULL; /* No match */ \ + } \ +} \ + \ +ITSTATIC ITSTRUCT * \ +ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \ +{ \ + ITSTRUCT *node; \ + \ + if (!root->rb_node) \ + return NULL; \ + node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \ + if (node->ITSUBTREE < start) \ + return NULL; \ + return ITPREFIX ## _subtree_search(node, start, last); \ +} \ + \ +ITSTATIC ITSTRUCT * \ +ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ +{ \ + struct rb_node *rb = node->ITRB.rb_right, *prev; \ + \ + while (true) { \ + /* \ + * Loop invariants: \ + * Cond1: ITSTART(node) <= last \ + * rb == node->ITRB.rb_right \ + * \ + * First, search right subtree if suitable \ + */ \ + if (rb) { \ + ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \ + if (start <= right->ITSUBTREE) \ + return ITPREFIX ## _subtree_search(right, \ + start, last); \ + } \ + \ + /* Move up the tree until we come from a node's left child */ \ + do { \ + rb = rb_parent(&node->ITRB); \ + if (!rb) \ + return NULL; \ + prev = &node->ITRB; \ + node = rb_entry(rb, ITSTRUCT, ITRB); \ + rb = node->ITRB.rb_right; \ + } while (prev == rb); \ + \ + /* Check if the node intersects [start;last] */ \ + if (last < ITSTART(node)) /* !Cond1 */ \ + return NULL; \ + else if (start <= ITLAST(node)) /* Cond2 */ \ + return node; \ + } \ +} diff --git a/qemuafl/interval-tree/rbtree.h b/qemuafl/interval-tree/rbtree.h new file mode 100644 index 0000000000000..da67ade044c4b --- /dev/null +++ b/qemuafl/interval-tree/rbtree.h @@ -0,0 +1,108 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/include/linux/rbtree.h + + To use rbtrees you'll have to implement your own insert and search cores. + This will avoid us to use callbacks and to drop drammatically performances. + I know it's not the cleaner way, but in C (not in C++) to get + performances and genericity... + + See Documentation/rbtree.txt for documentation and samples. +*/ + +#ifndef _LINUX_RBTREE_H +#define _LINUX_RBTREE_H + +#include +#include "compiler.h" + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); + /* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + + +#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) + +#define RB_ROOT (struct rb_root) { NULL, } +#define rb_entry(ptr, type, member) container_of(ptr, type, member) + +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) + +/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ +#define RB_EMPTY_NODE(node) \ + ((node)->__rb_parent_color == (unsigned long)(node)) +#define RB_CLEAR_NODE(node) \ + ((node)->__rb_parent_color = (unsigned long)(node)) + + +extern void rb_insert_color(struct rb_node *, struct rb_root *); +extern void rb_erase(struct rb_node *, struct rb_root *); + + +/* Find logical next and previous nodes in a tree */ +extern struct rb_node *rb_next(const struct rb_node *); +extern struct rb_node *rb_prev(const struct rb_node *); +extern struct rb_node *rb_first(const struct rb_root *); +extern struct rb_node *rb_last(const struct rb_root *); + +/* Postorder iteration - always visit the parent after its children */ +extern struct rb_node *rb_first_postorder(const struct rb_root *); +extern struct rb_node *rb_next_postorder(const struct rb_node *); + +/* Fast replacement of a single node without remove/rebalance/add/rebalance */ +extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); + +static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, + struct rb_node ** rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = NULL; + + *rb_link = node; +} + +#define rb_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? rb_entry(____ptr, type, member) : NULL; \ + }) + +/** + * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of + * given type safe against removal of rb_node entry + * + * @pos: the 'type *' to use as a loop cursor. + * @n: another 'type *' to use as temporary storage + * @root: 'rb_root *' of the rbtree. + * @field: the name of the rb_node field within 'type'. + */ +#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ + for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ + pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ + typeof(*pos), field); 1; }); \ + pos = n) + +#endif /* _LINUX_RBTREE_H */ diff --git a/qemuafl/interval-tree/rbtree.inl b/qemuafl/interval-tree/rbtree.inl new file mode 100644 index 0000000000000..a5b0d313df992 --- /dev/null +++ b/qemuafl/interval-tree/rbtree.inl @@ -0,0 +1,549 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + (C) 2002 David Woodhouse + (C) 2012 Michel Lespinasse + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/lib/rbtree.c +*/ + +#include +#include "rbtree_augmented.h" + +/* + * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree + * + * 1) A node is either red or black + * 2) The root is black + * 3) All leaves (NULL) are black + * 4) Both children of every red node are black + * 5) Every simple path from root to leaves contains the same number + * of black nodes. + * + * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two + * consecutive red nodes in a path and every red node is therefore followed by + * a black. So if B is the number of black nodes on every simple path (as per + * 5), then the longest possible path due to 4 is 2B. + * + * We shall indicate color with case, where black nodes are uppercase and red + * nodes will be lowercase. Unknown color nodes shall be drawn as red within + * parentheses and have some accompanying text comment. + */ + +static inline void rb_set_black(struct rb_node *rb) +{ + rb->__rb_parent_color |= RB_BLACK; +} + +static inline struct rb_node *rb_red_parent(struct rb_node *red) +{ + return (struct rb_node *)red->__rb_parent_color; +} + +/* + * Helper function for rotations: + * - old's parent and color get assigned to new + * - old gets assigned new as a parent and 'color' as a color. + */ +static inline void +__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, + struct rb_root *root, int color) +{ + struct rb_node *parent = rb_parent(old); + new->__rb_parent_color = old->__rb_parent_color; + rb_set_parent_color(old, new, color); + __rb_change_child(old, new, parent, root); +} + +static inline void +__rb_insert(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; + + while (true) { + /* + * Loop invariant: node is red + * + * If there is a black parent, we are done. + * Otherwise, take some corrective action as we don't + * want a red root or two consecutive red nodes. + */ + if (!parent) { + rb_set_parent_color(node, NULL, RB_BLACK); + break; + } else if (rb_is_black(parent)) + break; + + gparent = rb_red_parent(parent); + + tmp = gparent->rb_right; + if (parent != tmp) { /* parent == gparent->rb_left */ + if (tmp && rb_is_red(tmp)) { + /* + * Case 1 - color flips + * + * G g + * / \ / \ + * p u --> P U + * / / + * n n + * + * However, since g's parent might be red, and + * 4) does not allow this, we need to recurse + * at g. + */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; + } + + tmp = parent->rb_right; + if (node == tmp) { + /* + * Case 2 - left rotate at parent + * + * G G + * / \ / \ + * p U --> n U + * \ / + * n p + * + * This still leaves us in violation of 4), the + * continuation into Case 3 will fix that. + */ + parent->rb_right = tmp = node->rb_left; + node->rb_left = parent; + if (tmp) + rb_set_parent_color(tmp, parent, + RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); + augment_rotate(parent, node); + parent = node; + tmp = node->rb_right; + } + + /* + * Case 3 - right rotate at gparent + * + * G P + * / \ / \ + * p U --> n g + * / \ + * n U + */ + gparent->rb_left = tmp; /* == parent->rb_right */ + parent->rb_right = gparent; + if (tmp) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); + augment_rotate(gparent, parent); + break; + } else { + tmp = gparent->rb_left; + if (tmp && rb_is_red(tmp)) { + /* Case 1 - color flips */ + rb_set_parent_color(tmp, gparent, RB_BLACK); + rb_set_parent_color(parent, gparent, RB_BLACK); + node = gparent; + parent = rb_parent(node); + rb_set_parent_color(node, parent, RB_RED); + continue; + } + + tmp = parent->rb_left; + if (node == tmp) { + /* Case 2 - right rotate at parent */ + parent->rb_left = tmp = node->rb_right; + node->rb_right = parent; + if (tmp) + rb_set_parent_color(tmp, parent, + RB_BLACK); + rb_set_parent_color(parent, node, RB_RED); + augment_rotate(parent, node); + parent = node; + tmp = node->rb_left; + } + + /* Case 3 - left rotate at gparent */ + gparent->rb_right = tmp; /* == parent->rb_left */ + parent->rb_left = gparent; + if (tmp) + rb_set_parent_color(tmp, gparent, RB_BLACK); + __rb_rotate_set_parents(gparent, parent, root, RB_RED); + augment_rotate(gparent, parent); + break; + } + } +} + +/* + * Inline version for rb_erase() use - we want to be able to inline + * and eliminate the dummy_rotate callback there + */ +static inline void +____rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; + + while (true) { + /* + * Loop invariants: + * - node is black (or NULL on first iteration) + * - node is not the root (parent is not NULL) + * - All leaf paths going through parent and node have a + * black node count that is 1 lower than other leaf paths. + */ + sibling = parent->rb_right; + if (node != sibling) { /* node == parent->rb_left */ + if (rb_is_red(sibling)) { + /* + * Case 1 - left rotate at parent + * + * P S + * / \ / \ + * N s --> p Sr + * / \ / \ + * Sl Sr N Sl + */ + parent->rb_right = tmp1 = sibling->rb_left; + sibling->rb_left = parent; + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, + RB_RED); + augment_rotate(parent, sibling); + sibling = tmp1; + } + tmp1 = sibling->rb_right; + if (!tmp1 || rb_is_black(tmp1)) { + tmp2 = sibling->rb_left; + if (!tmp2 || rb_is_black(tmp2)) { + /* + * Case 2 - sibling color flip + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N s + * / \ / \ + * Sl Sr Sl Sr + * + * This leaves us violating 5) which + * can be fixed by flipping p to black + * if it was red, or by recursing at p. + * p is red when coming from Case 1. + */ + rb_set_parent_color(sibling, parent, + RB_RED); + if (rb_is_red(parent)) + rb_set_black(parent); + else { + node = parent; + parent = rb_parent(node); + if (parent) + continue; + } + break; + } + /* + * Case 3 - right rotate at sibling + * (p could be either color here) + * + * (p) (p) + * / \ / \ + * N S --> N Sl + * / \ \ + * sl Sr s + * \ + * Sr + */ + sibling->rb_left = tmp1 = tmp2->rb_right; + tmp2->rb_right = sibling; + parent->rb_right = tmp2; + if (tmp1) + rb_set_parent_color(tmp1, sibling, + RB_BLACK); + augment_rotate(sibling, tmp2); + tmp1 = sibling; + sibling = tmp2; + } + /* + * Case 4 - left rotate at parent + color flips + * (p and sl could be either color here. + * After rotation, p becomes black, s acquires + * p's color, and sl keeps its color) + * + * (p) (s) + * / \ / \ + * N S --> P Sr + * / \ / \ + * (sl) sr N (sl) + */ + parent->rb_right = tmp2 = sibling->rb_left; + sibling->rb_left = parent; + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if (tmp2) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, + RB_BLACK); + augment_rotate(parent, sibling); + break; + } else { + sibling = parent->rb_left; + if (rb_is_red(sibling)) { + /* Case 1 - right rotate at parent */ + parent->rb_left = tmp1 = sibling->rb_right; + sibling->rb_right = parent; + rb_set_parent_color(tmp1, parent, RB_BLACK); + __rb_rotate_set_parents(parent, sibling, root, + RB_RED); + augment_rotate(parent, sibling); + sibling = tmp1; + } + tmp1 = sibling->rb_left; + if (!tmp1 || rb_is_black(tmp1)) { + tmp2 = sibling->rb_right; + if (!tmp2 || rb_is_black(tmp2)) { + /* Case 2 - sibling color flip */ + rb_set_parent_color(sibling, parent, + RB_RED); + if (rb_is_red(parent)) + rb_set_black(parent); + else { + node = parent; + parent = rb_parent(node); + if (parent) + continue; + } + break; + } + /* Case 3 - right rotate at sibling */ + sibling->rb_right = tmp1 = tmp2->rb_left; + tmp2->rb_left = sibling; + parent->rb_left = tmp2; + if (tmp1) + rb_set_parent_color(tmp1, sibling, + RB_BLACK); + augment_rotate(sibling, tmp2); + tmp1 = sibling; + sibling = tmp2; + } + /* Case 4 - left rotate at parent + color flips */ + parent->rb_left = tmp2 = sibling->rb_right; + sibling->rb_right = parent; + rb_set_parent_color(tmp1, sibling, RB_BLACK); + if (tmp2) + rb_set_parent(tmp2, parent); + __rb_rotate_set_parents(parent, sibling, root, + RB_BLACK); + augment_rotate(parent, sibling); + break; + } + } +} + +/* Non-inline version for rb_erase_augmented() use */ +void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + ____rb_erase_color(parent, root, augment_rotate); +} + +/* + * Non-augmented rbtree manipulation functions. + * + * We use dummy augmented callbacks here, and have the compiler optimize them + * out of the rb_insert_color() and rb_erase() function definitions. + */ + +static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {} +static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} +static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} + +static const struct rb_augment_callbacks dummy_callbacks = { + dummy_propagate, dummy_copy, dummy_rotate +}; + +void rb_insert_color(struct rb_node *node, struct rb_root *root) +{ + __rb_insert(node, root, dummy_rotate); +} + +void rb_erase(struct rb_node *node, struct rb_root *root) +{ + struct rb_node *rebalance; + rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); + if (rebalance) + ____rb_erase_color(rebalance, root, dummy_rotate); +} + +/* + * Augmented rbtree manipulation functions. + * + * This instantiates the same __always_inline functions as in the non-augmented + * case, but this time with user-defined callbacks. + */ + +void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + __rb_insert(node, root, augment_rotate); +} + +/* + * This function returns the first node (in sort order) of the tree. + */ +struct rb_node *rb_first(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_left) + n = n->rb_left; + return n; +} + +struct rb_node *rb_last(const struct rb_root *root) +{ + struct rb_node *n; + + n = root->rb_node; + if (!n) + return NULL; + while (n->rb_right) + n = n->rb_right; + return n; +} + +struct rb_node *rb_next(const struct rb_node *node) +{ + struct rb_node *parent; + + if (RB_EMPTY_NODE(node)) + return NULL; + + /* + * If we have a right-hand child, go down and then left as far + * as we can. + */ + if (node->rb_right) { + node = node->rb_right; + while (node->rb_left) + node=node->rb_left; + return (struct rb_node *)node; + } + + /* + * No right-hand children. Everything down and left is smaller than us, + * so any 'next' node must be in the general direction of our parent. + * Go up the tree; any time the ancestor is a right-hand child of its + * parent, keep going up. First time it's a left-hand child of its + * parent, said parent is our 'next' node. + */ + while ((parent = rb_parent(node)) && node == parent->rb_right) + node = parent; + + return parent; +} + +struct rb_node *rb_prev(const struct rb_node *node) +{ + struct rb_node *parent; + + if (RB_EMPTY_NODE(node)) + return NULL; + + /* + * If we have a left-hand child, go down and then right as far + * as we can. + */ + if (node->rb_left) { + node = node->rb_left; + while (node->rb_right) + node=node->rb_right; + return (struct rb_node *)node; + } + + /* + * No left-hand children. Go up till we find an ancestor which + * is a right-hand child of its parent. + */ + while ((parent = rb_parent(node)) && node == parent->rb_left) + node = parent; + + return parent; +} + +void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root) +{ + struct rb_node *parent = rb_parent(victim); + + /* Set the surrounding nodes to point to the replacement */ + __rb_change_child(victim, new, parent, root); + if (victim->rb_left) + rb_set_parent(victim->rb_left, new); + if (victim->rb_right) + rb_set_parent(victim->rb_right, new); + + /* Copy the pointers/colour from the victim to the replacement */ + *new = *victim; +} + +static struct rb_node *rb_left_deepest_node(const struct rb_node *node) +{ + for (;;) { + if (node->rb_left) + node = node->rb_left; + else if (node->rb_right) + node = node->rb_right; + else + return (struct rb_node *)node; + } +} + +struct rb_node *rb_next_postorder(const struct rb_node *node) +{ + const struct rb_node *parent; + if (!node) + return NULL; + parent = rb_parent(node); + + /* If we're sitting on node, we've already seen our children */ + if (parent && node == parent->rb_left && parent->rb_right) { + /* If we are the parent's left node, go to the parent's right + * node then all the way down to the left */ + return rb_left_deepest_node(parent->rb_right); + } else + /* Otherwise we are the parent's right node, and the parent + * should be next */ + return (struct rb_node *)parent; +} + +struct rb_node *rb_first_postorder(const struct rb_root *root) +{ + if (!root->rb_node) + return NULL; + + return rb_left_deepest_node(root->rb_node); +} diff --git a/qemuafl/interval-tree/rbtree_augmented.h b/qemuafl/interval-tree/rbtree_augmented.h new file mode 100644 index 0000000000000..311abb6cde742 --- /dev/null +++ b/qemuafl/interval-tree/rbtree_augmented.h @@ -0,0 +1,245 @@ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + (C) 2002 David Woodhouse + (C) 2012 Michel Lespinasse + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + linux/include/linux/rbtree_augmented.h +*/ + +#ifndef _LINUX_RBTREE_AUGMENTED_H +#define _LINUX_RBTREE_AUGMENTED_H + +#include + +#include "compiler.h" + +#include "rbtree.h" + +/* + * Please note - only struct rb_augment_callbacks and the prototypes for + * rb_insert_augmented() and rb_erase_augmented() are intended to be public. + * The rest are implementation details you are not expected to depend on. + * + * See Documentation/rbtree.txt for documentation and samples. + */ + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *node, struct rb_node *stop); + void (*copy)(struct rb_node *old, struct rb_node *new); + void (*rotate)(struct rb_node *old, struct rb_node *new); +}; + +extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); +/* + * Fixup the rbtree and update the augmented information when rebalancing. + * + * On insertion, the user must update the augmented information on the path + * leading to the inserted node, then call rb_link_node() as usual and + * rb_augment_inserted() instead of the usual rb_insert_color() call. + * If rb_augment_inserted() rebalances the rbtree, it will callback into + * a user provided function to update the augmented information on the + * affected subtrees. + */ +static inline void +rb_insert_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + __rb_insert_augmented(node, root, augment->rotate); +} + +#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ + rbtype, rbaugmented, rbcompute) \ +static inline void \ +rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ +{ \ + while (rb != stop) { \ + rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ + rbtype augmented = rbcompute(node); \ + if (node->rbaugmented == augmented) \ + break; \ + node->rbaugmented = augmented; \ + rb = rb_parent(&node->rbfield); \ + } \ +} \ +static inline void \ +rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ +{ \ + rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ + rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ + new->rbaugmented = old->rbaugmented; \ +} \ +static void \ +rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ +{ \ + rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ + rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ + new->rbaugmented = old->rbaugmented; \ + old->rbaugmented = rbcompute(old); \ +} \ +rbstatic const struct rb_augment_callbacks rbname = { \ + rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ +}; + + +#define RB_RED 0 +#define RB_BLACK 1 + +#define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) + +#define __rb_color(pc) ((pc) & 1) +#define __rb_is_black(pc) __rb_color(pc) +#define __rb_is_red(pc) (!__rb_color(pc)) +#define rb_color(rb) __rb_color((rb)->__rb_parent_color) +#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) +#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) + +static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) +{ + rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; +} + +static inline void rb_set_parent_color(struct rb_node *rb, + struct rb_node *p, int color) +{ + rb->__rb_parent_color = (unsigned long)p | color; +} + +static inline void +__rb_change_child(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, struct rb_root *root) +{ + if (parent) { + if (parent->rb_left == old) + parent->rb_left = new; + else + parent->rb_right = new; + } else + root->rb_node = new; +} + +extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); + +static inline struct rb_node * +__rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *child = node->rb_right, *tmp = node->rb_left; + struct rb_node *parent, *rebalance; + unsigned long pc; + + if (!tmp) { + /* + * Case 1: node to erase has no more than 1 child (easy!) + * + * Note that if there is one child it must be red due to 5) + * and node must be black due to 4). We adjust colors locally + * so as to bypass __rb_erase_color() later on. + */ + pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, child, parent, root); + if (child) { + child->__rb_parent_color = pc; + rebalance = NULL; + } else + rebalance = __rb_is_black(pc) ? parent : NULL; + tmp = parent; + } else if (!child) { + /* Still case 1, but this time the child is node->rb_left */ + tmp->__rb_parent_color = pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, tmp, parent, root); + rebalance = NULL; + tmp = parent; + } else { + struct rb_node *successor = child, *child2; + tmp = child->rb_left; + if (!tmp) { + /* + * Case 2: node's successor is its right child + * + * (n) (s) + * / \ / \ + * (x) (s) -> (x) (c) + * \ + * (c) + */ + parent = successor; + child2 = successor->rb_right; + augment->copy(node, successor); + } else { + /* + * Case 3: node's successor is leftmost under + * node's right child subtree + * + * (n) (s) + * / \ / \ + * (x) (y) -> (x) (y) + * / / + * (p) (p) + * / / + * (s) (c) + * \ + * (c) + */ + do { + parent = successor; + successor = tmp; + tmp = tmp->rb_left; + } while (tmp); + parent->rb_left = child2 = successor->rb_right; + successor->rb_right = child; + rb_set_parent(child, successor); + augment->copy(node, successor); + augment->propagate(parent, successor); + } + + successor->rb_left = tmp = node->rb_left; + rb_set_parent(tmp, successor); + + pc = node->__rb_parent_color; + tmp = __rb_parent(pc); + __rb_change_child(node, successor, tmp, root); + if (child2) { + successor->__rb_parent_color = pc; + rb_set_parent_color(child2, parent, RB_BLACK); + rebalance = NULL; + } else { + unsigned long pc2 = successor->__rb_parent_color; + successor->__rb_parent_color = pc; + rebalance = __rb_is_black(pc2) ? parent : NULL; + } + tmp = successor; + } + + augment->propagate(tmp, NULL); + return rebalance; +} + +static inline void +rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); + if (rebalance) + __rb_erase_color(rebalance, root, augment->rotate); +} + +#endif /* _LINUX_RBTREE_AUGMENTED_H */ diff --git a/qemuafl/qasan-qemu.h b/qemuafl/qasan-qemu.h new file mode 100644 index 0000000000000..88e0c8624e7ee --- /dev/null +++ b/qemuafl/qasan-qemu.h @@ -0,0 +1,143 @@ +/******************************************************************************* +Copyright (c) 2019-2021, Andrea Fioraldi + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ + +#ifndef __QASAN_QEMU_H__ +#define __QASAN_QEMU_H__ + +#define ASAN_GIOVESE + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include "qasan.h" +#include "common.h" +#include "tcg/tcg.h" + +// options +extern int qasan_max_call_stack; // QASAN_MAX_CALL_STACK +extern int qasan_symbolize; // QASAN_SYMBOLIZE + +#define SHADOW_BK_SIZE (4096*8) + +struct shadow_stack_block { + + int index; + target_ulong buf[SHADOW_BK_SIZE]; + + struct shadow_stack_block* next; + +}; + +struct shadow_stack { + + int size; + struct shadow_stack_block* first; + +}; + +extern __thread struct shadow_stack qasan_shadow_stack; + +#ifdef ASAN_GIOVESE + +#if defined(TARGET_X86_64) || defined(TARGET_I386) + +#define PC_GET(env) ((env)->eip) +#define BP_GET(env) ((env)->regs[R_EBP]) +#define SP_GET(env) ((env)->regs[R_ESP]) + +#elif defined(TARGET_ARM) && !defined(TARGET_AARCH64) + +#define PC_GET(env) ((env)->regs[15]) +#define BP_GET(env) ((env)->regs[11]) +#define SP_GET(env) ((env)->regs[13]) + +#elif defined(TARGET_AARCH64) + +#define PC_GET(env) ((env)->pc) +#define BP_GET(env) ((env)->aarch64 ? (env)->xregs[29] : (env)->regs[11]) +#define SP_GET(env) ((env)->aarch64 ? (env)->xregs[31] : (env)->regs[13]) + +/* MIPS_PATCH */ +#elif defined(TARGET_MIPS) || defined(TARGET_MIPS64) + +#define PC_GET(env) ((env)->active_tc.PC) +#define BP_GET(env) ((env)->active_tc.gpr[29]) +#define SP_GET(env) ((env)->active_tc.gpr[30]) + +#elif defined(TARGET_PPC) + +#define PC_GET(env) ((env)->nip) +/* + * PPC doesn't really have a frame pointer since stack frames are built into a + * linked list. The BP is used only for display purposes in any case, so we will + * just use the SP here. + */ +#define BP_GET(env) ((env)->gpr[1]) +#define SP_GET(env) ((env)->gpr[1]) + +#else +//#error "Target not supported by asan-giovese" +#define DO_NOT_USE_QASAN 1 +#endif + +#ifndef DO_NOT_USE_QASAN +#define ASAN_NAME_STR "QEMU-AddressSanitizer" +#include "asan-giovese.h" +#endif + +#else + +void __asan_poison_memory_region(void const volatile *addr, size_t size); +void __asan_unpoison_memory_region(void const volatile *addr, size_t size); +void *__asan_region_is_poisoned(void *beg, size_t size); + +void __asan_load1(void*); +void __asan_load2(void*); +void __asan_load4(void*); +void __asan_load8(void*); +void __asan_store1(void*); +void __asan_store2(void*); +void __asan_store4(void*); +void __asan_store8(void*); +void __asan_loadN(void*, size_t); +void __asan_storeN(void*, size_t); + +#endif + +target_long qasan_actions_dispatcher(void *cpu_env, target_long action, + target_long arg1, target_long arg2, + target_long arg3); + +void qasan_gen_load1(TCGv addr, int off); +void qasan_gen_load2(TCGv addr, int off); +void qasan_gen_load4(TCGv addr, int off); +void qasan_gen_load8(TCGv addr, int off); +void qasan_gen_store1(TCGv addr, int off); +void qasan_gen_store2(TCGv addr, int off); +void qasan_gen_store4(TCGv addr, int off); +void qasan_gen_store8(TCGv addr, int off); + +#endif diff --git a/qemuafl/qasan.h b/qemuafl/qasan.h new file mode 100644 index 0000000000000..d001c827cd7fa --- /dev/null +++ b/qemuafl/qasan.h @@ -0,0 +1,264 @@ +/******************************************************************************* +Copyright (c) 2019-2021, Andrea Fioraldi + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*******************************************************************************/ + +#ifndef __QASAN_H__ +#define __QASAN_H__ + +#define QASAN_VERSTR "0.2" + +#define QASAN_FAKEINSTR_X86 { 0x0f, 0x3a, 0xf2 } + +#define QASAN_FAKESYS_NR 0xa2a4 + +enum { + QASAN_ACTION_CHECK_LOAD, + QASAN_ACTION_CHECK_STORE, + QASAN_ACTION_POISON, + QASAN_ACTION_USER_POISON, + QASAN_ACTION_UNPOISON, + QASAN_ACTION_IS_POISON, + QASAN_ACTION_ALLOC, + QASAN_ACTION_DEALLOC, + QASAN_ACTION_ENABLE, + QASAN_ACTION_DISABLE, + QASAN_ACTION_SWAP_STATE, +}; + +/* shadow map byte values */ +#define ASAN_VALID 0x00 +#define ASAN_PARTIAL1 0x01 +#define ASAN_PARTIAL2 0x02 +#define ASAN_PARTIAL3 0x03 +#define ASAN_PARTIAL4 0x04 +#define ASAN_PARTIAL5 0x05 +#define ASAN_PARTIAL6 0x06 +#define ASAN_PARTIAL7 0x07 +#define ASAN_ARRAY_COOKIE 0xac +#define ASAN_STACK_RZ 0xf0 +#define ASAN_STACK_LEFT_RZ 0xf1 +#define ASAN_STACK_MID_RZ 0xf2 +#define ASAN_STACK_RIGHT_RZ 0xf3 +#define ASAN_STACK_FREED 0xf5 +#define ASAN_STACK_OOSCOPE 0xf8 +#define ASAN_GLOBAL_RZ 0xf9 +#define ASAN_HEAP_RZ 0xe9 +#define ASAN_USER 0xf7 +#define ASAN_HEAP_LEFT_RZ 0xfa +#define ASAN_HEAP_RIGHT_RZ 0xfb +#define ASAN_HEAP_FREED 0xfd + +#define QASAN_ENABLED (0) +#define QASAN_DISABLED (1) + +#if defined(__x86_64__) && __x86_64__ + +// The backdoor is more performant than the fake syscall +#define QASAN_CALL0(action) \ +({ \ + uintptr_t __libqasan__ret__; \ + asm volatile ( \ + "movq %1, %%rax\n" \ + ".byte 0x0f\n" \ + ".byte 0x3a\n" \ + ".byte 0xf2\n" \ + "movq %%rax, %0\n" \ + : "=g"(__libqasan__ret__) \ + : "g"((uintptr_t)(action)) \ + : "%rax", "memory" \ + ); \ + __libqasan__ret__; \ +}) + +#define QASAN_CALL1(action, arg1) \ +({ \ + uintptr_t __libqasan__ret__; \ + asm volatile ( \ + "movq %1, %%rax\n" \ + "movq %2, %%rdi\n" \ + ".byte 0x0f\n" \ + ".byte 0x3a\n" \ + ".byte 0xf2\n" \ + "movq %%rax, %0\n" \ + : "=g"(__libqasan__ret__) \ + : "g"((uintptr_t)(action)), "g"((uintptr_t)(arg1)) \ + : "%rax", "%rdi", "memory" \ + ); \ + __libqasan__ret__; \ +}) + +#define QASAN_CALL2(action, arg1, arg2) \ +({ \ + uintptr_t __libqasan__ret__; \ + asm volatile ( \ + "movq %1, %%rax\n" \ + "movq %2, %%rdi\n" \ + "movq %3, %%rsi\n" \ + ".byte 0x0f\n" \ + ".byte 0x3a\n" \ + ".byte 0xf2\n" \ + "movq %%rax, %0\n" \ + : "=g"(__libqasan__ret__) \ + : "g"((uintptr_t)(action)), "g"((uintptr_t)(arg1)), "g"((uintptr_t)(arg2)) \ + : "%rax", "%rdi", "%rsi", "memory" \ + ); \ + __libqasan__ret__; \ +}) + +#define QASAN_CALL3(action, arg1, arg2, arg3) \ +({ \ + uintptr_t __libqasan__ret__; \ + asm volatile ( \ + "movq %1, %%rax\n" \ + "movq %2, %%rdi\n" \ + "movq %3, %%rsi\n" \ + "movq %4, %%rdx\n" \ + ".byte 0x0f\n" \ + ".byte 0x3a\n" \ + ".byte 0xf2\n" \ + "movq %%rax, %0\n" \ + : "=g"(__libqasan__ret__) \ + : "g"((uintptr_t)(action)), "g"((uintptr_t)(arg1)), "g"((uintptr_t)(arg2)), "g"((uintptr_t)(arg3)) \ + : "%rax", "%rdi", "%rsi", "%rdx", "memory" \ + ); \ + __libqasan__ret__; \ +}) + +/* + +#elif defined(__i386__) && __i386__ + +// The backdoor is more performant than the fake syscall +#define QASAN_CALL0(action) \ +({ \ + uintptr_t __libqasan__ret__; \ + asm volatile ( \ + "movl %1, %%eax\n" \ + ".byte 0x0f\n" \ + ".byte 0x3a\n" \ + ".byte 0xf2\n" \ + "movl %%eax, %0\n" \ + : "=g"(__libqasan__ret__) \ + : "g"((uintptr_t)(action)) \ + : "%eax", "memory" \ + ); \ + __libqasan__ret__; \ +}) + +#define QASAN_CALL1(action, arg1) \ +({ \ + uintptr_t __libqasan__ret__; \ + asm volatile ( \ + "movl %1, %%eax\n" \ + "movl %2, %%edi\n" \ + ".byte 0x0f\n" \ + ".byte 0x3a\n" \ + ".byte 0xf2\n" \ + "movl %%eax, %0\n" \ + : "=g"(__libqasan__ret__) \ + : "g"((uintptr_t)(action)), "g"((uintptr_t)(arg1)) \ + : "%eax", "%edi", "memory" \ + ); \ + __libqasan__ret__; \ +}) + +#define QASAN_CALL2(action, arg1, arg2) \ +({ \ + uintptr_t __libqasan__ret__; \ + asm volatile ( \ + "movl %1, %%eax\n" \ + "movl %2, %%edi\n" \ + "movl %3, %%esi\n" \ + ".byte 0x0f\n" \ + ".byte 0x3a\n" \ + ".byte 0xf2\n" \ + "movl %%eax, %0\n" \ + : "=g"(__libqasan__ret__) \ + : "g"((uintptr_t)(action)), "g"((uintptr_t)(arg1)), "g"((uintptr_t)(arg2)) \ + : "%eax", "%edi", "%esi", "memory" \ + ); \ + __libqasan__ret__; \ +}) + +#define QASAN_CALL3(action, arg1, arg2, arg3) \ +({ \ + uintptr_t __libqasan__ret__; \ + asm volatile ( \ + "movl %1, %%eax\n" \ + "movl %2, %%edi\n" \ + "movl %3, %%esi\n" \ + "movl %4, %%edx\n" \ + ".byte 0x0f\n" \ + ".byte 0x3a\n" \ + ".byte 0xf2\n" \ + "movl %%eax, %0\n" \ + : "=g"(__libqasan__ret__) \ + : "g"((uintptr_t)(action)), "g"((uintptr_t)(arg1)), "g"((uintptr_t)(arg2)), "g"((uintptr_t)(arg3)) \ + : "%eax", "%edi", "%esi", "%edx", "memory" \ + ); \ + __libqasan__ret__; \ +}) + +*/ + +#else + +// fake syscall, works only for QASan user-mode!!! + +#include + +#define QASAN_CALL0(action) \ + syscall(QASAN_FAKESYS_NR, action, NULL, NULL, NULL) +#define QASAN_CALL1(action, arg1) \ + syscall(QASAN_FAKESYS_NR, action, arg1, NULL, NULL) +#define QASAN_CALL2(action, arg1, arg2) \ + syscall(QASAN_FAKESYS_NR, action, arg1, arg2, NULL) +#define QASAN_CALL3(action, arg1, arg2, arg3) \ + syscall(QASAN_FAKESYS_NR, action, arg1, arg2, arg3) + +#endif + +#define QASAN_LOAD(ptr, len) \ + QASAN_CALL2(QASAN_ACTION_CHECK_LOAD, ptr, len) +#define QASAN_STORE(ptr, len) \ + QASAN_CALL2(QASAN_ACTION_CHECK_STORE, ptr, len) + +#define QASAN_POISON(ptr, len, poison_byte) \ + QASAN_CALL3(QASAN_ACTION_POISON, ptr, len, poison_byte) +#define QASAN_USER_POISON(ptr, len) \ + QASAN_CALL3(QASAN_ACTION_POISON, ptr, len, ASAN_USER) +#define QASAN_UNPOISON(ptr, len) \ + QASAN_CALL2(QASAN_ACTION_UNPOISON, ptr, len) +#define QASAN_IS_POISON(ptr, len) \ + QASAN_CALL2(QASAN_ACTION_IS_POISON, ptr, len) + +#define QASAN_ALLOC(start, end) \ + QASAN_CALL2(QASAN_ACTION_ALLOC, start, end) +#define QASAN_DEALLOC(ptr) \ + QASAN_CALL1(QASAN_ACTION_DEALLOC, ptr) + +#define QASAN_SWAP(state) \ + QASAN_CALL1(QASAN_ACTION_SWAP_STATE, state) + +#endif diff --git a/roms/SLOF b/roms/SLOF index e18ddad8516ff..b7ea243afd391 160000 --- a/roms/SLOF +++ b/roms/SLOF @@ -1 +1 @@ -Subproject commit e18ddad8516ff2cfe36ec130200318f7251aa78c +Subproject commit b7ea243afd3917b9dfc58fda046138fba0aa9b1e diff --git a/target/arm/jmp_env b/target/arm/jmp_env new file mode 100644 index 0000000000000..3fd46de94a4ed --- /dev/null +++ b/target/arm/jmp_env @@ -0,0 +1,1330 @@ +arch_dump.c: note->sve.max_size = cpu_to_dump32(s, sve_size_vq(cpu->sve_max_vq)); +arch_dump.c: note->sve.max_vl = cpu_to_dump16(s, cpu->sve_max_vq * 16); +arch_dump.c: CPUARMState *env = &cpu->env; +arch_dump.c: CPUARMState *env = &cpu->env; +arch_dump.c: env = &cpu->env; +arch_dump.c: note_size += AARCH64_SVE_NOTE_SIZE(&cpu->env); +gdbstub64.c: CPUARMState *env = &cpu->env; +gdbstub64.c: CPUARMState *env = &cpu->env; +translate.h: /* A copy of cpu->dcz_blocksize. */ +kvm_arm.h: cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; +kvm_arm.h: cpu->host_cpu_probe_failed = true; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +arm-semi.c: CPUARMState *env = &cpu->env; +machine.c: return (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) +machine.c: CPUARMState *env = &cpu->env; +machine.c: CPUARMState *env = &cpu->env; +machine.c: CPUARMState *env = &cpu->env; +machine.c: CPUARMState *env = &cpu->env; +machine.c: CPUARMState *env = &cpu->env; +machine.c: return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK +machine.c: && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK; +machine.c: CPUARMState *env = &cpu->env; +machine.c: CPUARMState *env = &cpu->env; +machine.c: CPUARMState *env = &cpu->env; +machine.c: return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion; +machine.c: CPUARMState *env = &cpu->env; +machine.c: CPUARMState *env = &cpu->env; +machine.c: return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion; +machine.c: return cpu->env.sau.rnr < cpu->sau_sregion; +machine.c: CPUARMState *env = &cpu->env; +machine.c: CPUARMState *env = &cpu->env; +machine.c: CPUARMState *env = &cpu->env; +machine.c: cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON; +machine.c: if (cpu->power_state == PSCI_ON || +machine.c: cpu->power_state == PSCI_OFF) { +machine.c: bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false; +machine.c: pmu_op_start(&cpu->env); +machine.c: cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; +machine.c: memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes, +machine.c: cpu->cpreg_array_len * sizeof(uint64_t)); +machine.c: memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values, +machine.c: cpu->cpreg_array_len * sizeof(uint64_t)); +machine.c: pmu_op_finish(&cpu->env); +machine.c: CPUARMState *env = &cpu->env; +machine.c: pmu_op_start(&cpu->env); +machine.c: CPUARMState *env = &cpu->env; +machine.c: for (i = 0, v = 0; i < cpu->cpreg_array_len +machine.c: && v < cpu->cpreg_vmstate_array_len; i++) { +machine.c: if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) { +machine.c: if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) { +machine.c: cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v]; +machine.c: pmu_op_finish(&cpu->env); +machine.c: arm_rebuild_hflags(&cpu->env); +psci.c: CPUARMState *env = &cpu->env; +psci.c: if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_HVC) { +psci.c: if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { +psci.c: CPUARMState *env = &cpu->env; +psci.c: ret = target_cpu->power_state; +psci.c: ret = arm_set_cpu_off(cpu->mp_affinity); +cpu64.c: return (cpu->core_count - 1) << 24; +cpu64.c: cpu->dtb_compatible = "arm,cortex-a57"; +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_V8); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_NEON); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_AARCH64); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_EL2); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_EL3); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_PMU); +cpu64.c: cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57; +cpu64.c: cpu->midr = 0x411fd070; +cpu64.c: cpu->revidr = 0x00000000; +cpu64.c: cpu->reset_fpsid = 0x41034070; +cpu64.c: cpu->isar.mvfr0 = 0x10110222; +cpu64.c: cpu->isar.mvfr1 = 0x12111111; +cpu64.c: cpu->isar.mvfr2 = 0x00000043; +cpu64.c: cpu->ctr = 0x8444c004; +cpu64.c: cpu->reset_sctlr = 0x00c50838; +cpu64.c: cpu->isar.id_pfr0 = 0x00000131; +cpu64.c: cpu->isar.id_pfr1 = 0x00011011; +cpu64.c: cpu->isar.id_dfr0 = 0x03010066; +cpu64.c: cpu->id_afr0 = 0x00000000; +cpu64.c: cpu->isar.id_mmfr0 = 0x10101105; +cpu64.c: cpu->isar.id_mmfr1 = 0x40000000; +cpu64.c: cpu->isar.id_mmfr2 = 0x01260000; +cpu64.c: cpu->isar.id_mmfr3 = 0x02102211; +cpu64.c: cpu->isar.id_isar0 = 0x02101110; +cpu64.c: cpu->isar.id_isar1 = 0x13112111; +cpu64.c: cpu->isar.id_isar2 = 0x21232042; +cpu64.c: cpu->isar.id_isar3 = 0x01112131; +cpu64.c: cpu->isar.id_isar4 = 0x00011142; +cpu64.c: cpu->isar.id_isar5 = 0x00011121; +cpu64.c: cpu->isar.id_isar6 = 0; +cpu64.c: cpu->isar.id_aa64pfr0 = 0x00002222; +cpu64.c: cpu->isar.id_aa64dfr0 = 0x10305106; +cpu64.c: cpu->isar.id_aa64isar0 = 0x00011120; +cpu64.c: cpu->isar.id_aa64mmfr0 = 0x00001124; +cpu64.c: cpu->isar.dbgdidr = 0x3516d000; +cpu64.c: cpu->clidr = 0x0a200023; +cpu64.c: cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ +cpu64.c: cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ +cpu64.c: cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */ +cpu64.c: cpu->dcz_blocksize = 4; /* 64 bytes */ +cpu64.c: cpu->gic_num_lrs = 4; +cpu64.c: cpu->gic_vpribits = 5; +cpu64.c: cpu->gic_vprebits = 5; +cpu64.c: cpu->dtb_compatible = "arm,cortex-a53"; +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_V8); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_NEON); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_AARCH64); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_EL2); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_EL3); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_PMU); +cpu64.c: cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53; +cpu64.c: cpu->midr = 0x410fd034; +cpu64.c: cpu->revidr = 0x00000000; +cpu64.c: cpu->reset_fpsid = 0x41034070; +cpu64.c: cpu->isar.mvfr0 = 0x10110222; +cpu64.c: cpu->isar.mvfr1 = 0x12111111; +cpu64.c: cpu->isar.mvfr2 = 0x00000043; +cpu64.c: cpu->ctr = 0x84448004; /* L1Ip = VIPT */ +cpu64.c: cpu->reset_sctlr = 0x00c50838; +cpu64.c: cpu->isar.id_pfr0 = 0x00000131; +cpu64.c: cpu->isar.id_pfr1 = 0x00011011; +cpu64.c: cpu->isar.id_dfr0 = 0x03010066; +cpu64.c: cpu->id_afr0 = 0x00000000; +cpu64.c: cpu->isar.id_mmfr0 = 0x10101105; +cpu64.c: cpu->isar.id_mmfr1 = 0x40000000; +cpu64.c: cpu->isar.id_mmfr2 = 0x01260000; +cpu64.c: cpu->isar.id_mmfr3 = 0x02102211; +cpu64.c: cpu->isar.id_isar0 = 0x02101110; +cpu64.c: cpu->isar.id_isar1 = 0x13112111; +cpu64.c: cpu->isar.id_isar2 = 0x21232042; +cpu64.c: cpu->isar.id_isar3 = 0x01112131; +cpu64.c: cpu->isar.id_isar4 = 0x00011142; +cpu64.c: cpu->isar.id_isar5 = 0x00011121; +cpu64.c: cpu->isar.id_isar6 = 0; +cpu64.c: cpu->isar.id_aa64pfr0 = 0x00002222; +cpu64.c: cpu->isar.id_aa64dfr0 = 0x10305106; +cpu64.c: cpu->isar.id_aa64isar0 = 0x00011120; +cpu64.c: cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ +cpu64.c: cpu->isar.dbgdidr = 0x3516d000; +cpu64.c: cpu->clidr = 0x0a200023; +cpu64.c: cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ +cpu64.c: cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ +cpu64.c: cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */ +cpu64.c: cpu->dcz_blocksize = 4; /* 64 bytes */ +cpu64.c: cpu->gic_num_lrs = 4; +cpu64.c: cpu->gic_vpribits = 5; +cpu64.c: cpu->gic_vprebits = 5; +cpu64.c: cpu->dtb_compatible = "arm,cortex-a72"; +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_V8); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_NEON); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_AARCH64); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_EL2); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_EL3); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_PMU); +cpu64.c: cpu->midr = 0x410fd083; +cpu64.c: cpu->revidr = 0x00000000; +cpu64.c: cpu->reset_fpsid = 0x41034080; +cpu64.c: cpu->isar.mvfr0 = 0x10110222; +cpu64.c: cpu->isar.mvfr1 = 0x12111111; +cpu64.c: cpu->isar.mvfr2 = 0x00000043; +cpu64.c: cpu->ctr = 0x8444c004; +cpu64.c: cpu->reset_sctlr = 0x00c50838; +cpu64.c: cpu->isar.id_pfr0 = 0x00000131; +cpu64.c: cpu->isar.id_pfr1 = 0x00011011; +cpu64.c: cpu->isar.id_dfr0 = 0x03010066; +cpu64.c: cpu->id_afr0 = 0x00000000; +cpu64.c: cpu->isar.id_mmfr0 = 0x10201105; +cpu64.c: cpu->isar.id_mmfr1 = 0x40000000; +cpu64.c: cpu->isar.id_mmfr2 = 0x01260000; +cpu64.c: cpu->isar.id_mmfr3 = 0x02102211; +cpu64.c: cpu->isar.id_isar0 = 0x02101110; +cpu64.c: cpu->isar.id_isar1 = 0x13112111; +cpu64.c: cpu->isar.id_isar2 = 0x21232042; +cpu64.c: cpu->isar.id_isar3 = 0x01112131; +cpu64.c: cpu->isar.id_isar4 = 0x00011142; +cpu64.c: cpu->isar.id_isar5 = 0x00011121; +cpu64.c: cpu->isar.id_aa64pfr0 = 0x00002222; +cpu64.c: cpu->isar.id_aa64dfr0 = 0x10305106; +cpu64.c: cpu->isar.id_aa64isar0 = 0x00011120; +cpu64.c: cpu->isar.id_aa64mmfr0 = 0x00001124; +cpu64.c: cpu->isar.dbgdidr = 0x3516d000; +cpu64.c: cpu->clidr = 0x0a200023; +cpu64.c: cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ +cpu64.c: cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ +cpu64.c: cpu->ccsidr[2] = 0x707fe07a; /* 1MB L2 cache */ +cpu64.c: cpu->dcz_blocksize = 4; /* 64 bytes */ +cpu64.c: cpu->gic_num_lrs = 4; +cpu64.c: cpu->gic_vpribits = 5; +cpu64.c: cpu->gic_vprebits = 5; +cpu64.c: if (!bitmap_empty(cpu->sve_vq_map, ARM_MAX_VQ)) { +cpu64.c: max_vq = find_last_bit(cpu->sve_vq_map, ARM_MAX_VQ) + 1; +cpu64.c: if (cpu->sve_max_vq && max_vq > cpu->sve_max_vq) { +cpu64.c: max_vq * 128, cpu->sve_max_vq, +cpu64.c: cpu->sve_max_vq * 128); +cpu64.c: bitmap_andnot(tmp, kvm_supported, cpu->sve_vq_init, max_vq); +cpu64.c: bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq); +cpu64.c: if (!test_bit(vq - 1, cpu->sve_vq_init)) { +cpu64.c: set_bit(vq - 1, cpu->sve_vq_map); +cpu64.c: } else if (cpu->sve_max_vq == 0) { +cpu64.c: if (test_bit(vq - 1, cpu->sve_vq_init) && +cpu64.c: bitmap_andnot(cpu->sve_vq_map, kvm_supported, +cpu64.c: cpu->sve_vq_init, max_vq); +cpu64.c: if (max_vq == 0 || bitmap_empty(cpu->sve_vq_map, max_vq)) { +cpu64.c: if (test_bit(0, cpu->sve_vq_init)) { +cpu64.c: if (test_bit(vq - 1, cpu->sve_vq_init)) { +cpu64.c: bitmap_complement(cpu->sve_vq_map, cpu->sve_vq_init, max_vq); +cpu64.c: max_vq = find_last_bit(cpu->sve_vq_map, max_vq) + 1; +cpu64.c: if (cpu->sve_max_vq != 0) { +cpu64.c: max_vq = cpu->sve_max_vq; +cpu64.c: if (!test_bit(max_vq - 1, cpu->sve_vq_map) && +cpu64.c: test_bit(max_vq - 1, cpu->sve_vq_init)) { +cpu64.c: bitmap_complement(tmp, cpu->sve_vq_init, max_vq); +cpu64.c: bitmap_or(cpu->sve_vq_map, cpu->sve_vq_map, tmp, max_vq); +cpu64.c: bitmap_clear(cpu->sve_vq_map, max_vq, ARM_MAX_VQ - max_vq); +cpu64.c: bitmap_xor(tmp, cpu->sve_vq_map, kvm_supported, max_vq); +cpu64.c: if (test_bit(vq - 1, cpu->sve_vq_map)) { +cpu64.c: if (cpu->sve_max_vq) { +cpu64.c: cpu->sve_max_vq); +cpu64.c: if (!test_bit(vq - 1, cpu->sve_vq_map)) { +cpu64.c: cpu->sve_max_vq = max_vq; +cpu64.c: value = cpu->sve_max_vq; +cpu64.c: cpu->sve_max_vq = max_vq; +cpu64.c: value = test_bit(vq - 1, cpu->sve_vq_map); +cpu64.c: set_bit(vq - 1, cpu->sve_vq_map); +cpu64.c: clear_bit(vq - 1, cpu->sve_vq_map); +cpu64.c: set_bit(vq - 1, cpu->sve_vq_init); +cpu64.c: t = cpu->isar.id_aa64pfr0; +cpu64.c: cpu->isar.id_aa64pfr0 = t; +cpu64.c: cpu->midr = t; +cpu64.c: t = cpu->isar.id_aa64isar0; +cpu64.c: cpu->isar.id_aa64isar0 = t; +cpu64.c: t = cpu->isar.id_aa64isar1; +cpu64.c: cpu->isar.id_aa64isar1 = t; +cpu64.c: t = cpu->isar.id_aa64pfr0; +cpu64.c: cpu->isar.id_aa64pfr0 = t; +cpu64.c: t = cpu->isar.id_aa64pfr1; +cpu64.c: cpu->isar.id_aa64pfr1 = t; +cpu64.c: t = cpu->isar.id_aa64mmfr1; +cpu64.c: cpu->isar.id_aa64mmfr1 = t; +cpu64.c: t = cpu->isar.id_aa64mmfr2; +cpu64.c: cpu->isar.id_aa64mmfr2 = t; +cpu64.c: u = cpu->isar.id_isar5; +cpu64.c: cpu->isar.id_isar5 = u; +cpu64.c: u = cpu->isar.id_isar6; +cpu64.c: cpu->isar.id_isar6 = u; +cpu64.c: u = cpu->isar.id_mmfr3; +cpu64.c: cpu->isar.id_mmfr3 = u; +cpu64.c: u = cpu->isar.id_mmfr4; +cpu64.c: cpu->isar.id_mmfr4 = u; +cpu64.c: t = cpu->isar.id_aa64dfr0; +cpu64.c: cpu->isar.id_aa64dfr0 = t; +cpu64.c: u = cpu->isar.id_dfr0; +cpu64.c: cpu->isar.id_dfr0 = u; +cpu64.c: u = cpu->isar.mvfr1; +cpu64.c: cpu->isar.mvfr1 = u; +cpu64.c: cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */ +cpu64.c: cpu->dcz_blocksize = 7; /* 512 bytes */ +cpu64.c: return arm_feature(&cpu->env, ARM_FEATURE_AARCH64); +cpu64.c: unset_feature(&cpu->env, ARM_FEATURE_AARCH64); +cpu64.c: set_feature(&cpu->env, ARM_FEATURE_AARCH64); +op_helper.c: cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { +kvm.c: init.target = cpu->kvm_target; +kvm.c: memcpy(init.features, cpu->kvm_init_features, sizeof(init.features)); +kvm.c: CPUARMState *env = &cpu->env; +kvm.c: cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; +kvm.c: cpu->host_cpu_probe_failed = true; +kvm.c: cpu->kvm_target = arm_host_cpu_features.target; +kvm.c: cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible; +kvm.c: cpu->isar = arm_host_cpu_features.isar; +kvm.c: CPUARMState *env = &cpu->env; +kvm.c: cpu->kvm_adjvtime = true; +kvm.c: return cpu->cpu_index; +kvm.c: res = bsearch(®idx, cpu->cpreg_indexes, cpu->cpreg_array_len, +kvm.c: return &cpu->cpreg_values[res - cpu->cpreg_indexes]; +kvm.c: cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen); +kvm.c: cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen); +kvm.c: cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes, +kvm.c: cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values, +kvm.c: cpu->cpreg_array_len = arraylen; +kvm.c: cpu->cpreg_vmstate_array_len = arraylen; +kvm.c: cpu->cpreg_indexes[arraylen] = regidx; +kvm.c: assert(cpu->cpreg_array_len == arraylen); +kvm.c: for (i = 0; i < cpu->cpreg_array_len; i++) { +kvm.c: uint64_t regidx = cpu->cpreg_indexes[i]; +kvm.c: cpu->cpreg_values[i] = v32; +kvm.c: r.addr = (uintptr_t)(cpu->cpreg_values + i); +kvm.c: for (i = 0; i < cpu->cpreg_array_len; i++) { +kvm.c: uint64_t regidx = cpu->cpreg_indexes[i]; +kvm.c: v32 = cpu->cpreg_values[i]; +kvm.c: r.addr = (uintptr_t)(cpu->cpreg_values + i); +kvm.c: if (cpu->kvm_vtime_dirty) { +kvm.c: *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT) = cpu->kvm_vtime; +kvm.c: if (cpu->kvm_adjvtime) { +kvm.c: cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT); +kvm.c: cpu->kvm_vtime_dirty = true; +kvm.c: .mp_state = (cpu->power_state == PSCI_OFF) ? +kvm.c: cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ? +kvm.c: .addr = (uintptr_t)&cpu->kvm_vtime, +kvm.c: if (cpu->kvm_vtime_dirty) { +kvm.c: cpu->kvm_vtime_dirty = true; +kvm.c: .addr = (uintptr_t)&cpu->kvm_vtime, +kvm.c: if (!cpu->kvm_vtime_dirty) { +kvm.c: cpu->kvm_vtime_dirty = false; +kvm.c: CPUARMState *env = &cpu->env; +kvm.c: CPUARMState *env = &cpu->env; +kvm.c: CPUARMState *env = &cpu->env; +kvm.c: if (run->s.regs.device_irq_level != cpu->device_irq_level) { +kvm.c: switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level; +kvm.c: qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT], +kvm.c: qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS], +kvm.c: qemu_set_irq(cpu->pmu_interrupt, +kvm.c: cpu->device_irq_level = run->s.regs.device_irq_level; +kvm.c: if (cpu->kvm_adjvtime) { +kvm.c: if (cpu->kvm_adjvtime) { +kvm.c: CPUARMState *env = &cpu->env; +tlb_helper.c: CPUARMState *env = &cpu->env; +tlb_helper.c: cpu->env.exception.vaddress = address; +tlb_helper.c: ret = get_phys_addr(&cpu->env, address, access_type, +tlb_helper.c: core_to_arm_mmu_idx(&cpu->env, mmu_idx), +cpu.h:#include "cpu-qom.h" +cpu.h:#include "exec/cpu-defs.h" +cpu.h:/* We use the combination of InD and Level to index into cpu->ccsidr[]; +cpu.h: return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0; +cpu.h:#include "exec/cpu-all.h" +cpu_tcg.c: CPUARMState *env = &cpu->env; +cpu_tcg.c: cpu->dtb_compatible = "arm,arm926"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); +cpu_tcg.c: cpu->midr = 0x41069265; +cpu_tcg.c: cpu->reset_fpsid = 0x41011090; +cpu_tcg.c: cpu->ctr = 0x1dd20d2; +cpu_tcg.c: cpu->reset_sctlr = 0x00090078; +cpu_tcg.c: cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); +cpu_tcg.c: cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); +cpu_tcg.c: cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1); +cpu_tcg.c: cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1); +cpu_tcg.c: cpu->dtb_compatible = "arm,arm946"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_PMSA); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu_tcg.c: cpu->midr = 0x41059461; +cpu_tcg.c: cpu->ctr = 0x0f004006; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "arm,arm1026"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_AUXCR); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN); +cpu_tcg.c: cpu->midr = 0x4106a262; +cpu_tcg.c: cpu->reset_fpsid = 0x410110a0; +cpu_tcg.c: cpu->ctr = 0x1dd20d2; +cpu_tcg.c: cpu->reset_sctlr = 0x00090078; +cpu_tcg.c: cpu->reset_auxcr = 1; +cpu_tcg.c: cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); +cpu_tcg.c: cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); +cpu_tcg.c: cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1); +cpu_tcg.c: cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1); +cpu_tcg.c: cpu->dtb_compatible = "arm,arm1136"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V6); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); +cpu_tcg.c: cpu->midr = 0x4107b362; +cpu_tcg.c: cpu->reset_fpsid = 0x410120b4; +cpu_tcg.c: cpu->isar.mvfr0 = 0x11111111; +cpu_tcg.c: cpu->isar.mvfr1 = 0x00000000; +cpu_tcg.c: cpu->ctr = 0x1dd20d2; +cpu_tcg.c: cpu->reset_sctlr = 0x00050078; +cpu_tcg.c: cpu->isar.id_pfr0 = 0x111; +cpu_tcg.c: cpu->isar.id_pfr1 = 0x1; +cpu_tcg.c: cpu->isar.id_dfr0 = 0x2; +cpu_tcg.c: cpu->id_afr0 = 0x3; +cpu_tcg.c: cpu->isar.id_mmfr0 = 0x01130003; +cpu_tcg.c: cpu->isar.id_mmfr1 = 0x10030302; +cpu_tcg.c: cpu->isar.id_mmfr2 = 0x01222110; +cpu_tcg.c: cpu->isar.id_isar0 = 0x00140011; +cpu_tcg.c: cpu->isar.id_isar1 = 0x12002111; +cpu_tcg.c: cpu->isar.id_isar2 = 0x11231111; +cpu_tcg.c: cpu->isar.id_isar3 = 0x01102131; +cpu_tcg.c: cpu->isar.id_isar4 = 0x141; +cpu_tcg.c: cpu->reset_auxcr = 7; +cpu_tcg.c: cpu->dtb_compatible = "arm,arm1136"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V6K); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V6); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); +cpu_tcg.c: cpu->midr = 0x4117b363; +cpu_tcg.c: cpu->reset_fpsid = 0x410120b4; +cpu_tcg.c: cpu->isar.mvfr0 = 0x11111111; +cpu_tcg.c: cpu->isar.mvfr1 = 0x00000000; +cpu_tcg.c: cpu->ctr = 0x1dd20d2; +cpu_tcg.c: cpu->reset_sctlr = 0x00050078; +cpu_tcg.c: cpu->isar.id_pfr0 = 0x111; +cpu_tcg.c: cpu->isar.id_pfr1 = 0x1; +cpu_tcg.c: cpu->isar.id_dfr0 = 0x2; +cpu_tcg.c: cpu->id_afr0 = 0x3; +cpu_tcg.c: cpu->isar.id_mmfr0 = 0x01130003; +cpu_tcg.c: cpu->isar.id_mmfr1 = 0x10030302; +cpu_tcg.c: cpu->isar.id_mmfr2 = 0x01222110; +cpu_tcg.c: cpu->isar.id_isar0 = 0x00140011; +cpu_tcg.c: cpu->isar.id_isar1 = 0x12002111; +cpu_tcg.c: cpu->isar.id_isar2 = 0x11231111; +cpu_tcg.c: cpu->isar.id_isar3 = 0x01102131; +cpu_tcg.c: cpu->isar.id_isar4 = 0x141; +cpu_tcg.c: cpu->reset_auxcr = 7; +cpu_tcg.c: cpu->dtb_compatible = "arm,arm1176"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V6K); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_VAPA); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_EL3); +cpu_tcg.c: cpu->midr = 0x410fb767; +cpu_tcg.c: cpu->reset_fpsid = 0x410120b5; +cpu_tcg.c: cpu->isar.mvfr0 = 0x11111111; +cpu_tcg.c: cpu->isar.mvfr1 = 0x00000000; +cpu_tcg.c: cpu->ctr = 0x1dd20d2; +cpu_tcg.c: cpu->reset_sctlr = 0x00050078; +cpu_tcg.c: cpu->isar.id_pfr0 = 0x111; +cpu_tcg.c: cpu->isar.id_pfr1 = 0x11; +cpu_tcg.c: cpu->isar.id_dfr0 = 0x33; +cpu_tcg.c: cpu->id_afr0 = 0; +cpu_tcg.c: cpu->isar.id_mmfr0 = 0x01130003; +cpu_tcg.c: cpu->isar.id_mmfr1 = 0x10030302; +cpu_tcg.c: cpu->isar.id_mmfr2 = 0x01222100; +cpu_tcg.c: cpu->isar.id_isar0 = 0x0140011; +cpu_tcg.c: cpu->isar.id_isar1 = 0x12002111; +cpu_tcg.c: cpu->isar.id_isar2 = 0x11231121; +cpu_tcg.c: cpu->isar.id_isar3 = 0x01102131; +cpu_tcg.c: cpu->isar.id_isar4 = 0x01141; +cpu_tcg.c: cpu->reset_auxcr = 7; +cpu_tcg.c: cpu->dtb_compatible = "arm,arm11mpcore"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V6K); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_VAPA); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_MPIDR); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu_tcg.c: cpu->midr = 0x410fb022; +cpu_tcg.c: cpu->reset_fpsid = 0x410120b4; +cpu_tcg.c: cpu->isar.mvfr0 = 0x11111111; +cpu_tcg.c: cpu->isar.mvfr1 = 0x00000000; +cpu_tcg.c: cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ +cpu_tcg.c: cpu->isar.id_pfr0 = 0x111; +cpu_tcg.c: cpu->isar.id_pfr1 = 0x1; +cpu_tcg.c: cpu->isar.id_dfr0 = 0; +cpu_tcg.c: cpu->id_afr0 = 0x2; +cpu_tcg.c: cpu->isar.id_mmfr0 = 0x01100103; +cpu_tcg.c: cpu->isar.id_mmfr1 = 0x10020302; +cpu_tcg.c: cpu->isar.id_mmfr2 = 0x01222000; +cpu_tcg.c: cpu->isar.id_isar0 = 0x00100011; +cpu_tcg.c: cpu->isar.id_isar1 = 0x12002111; +cpu_tcg.c: cpu->isar.id_isar2 = 0x11221011; +cpu_tcg.c: cpu->isar.id_isar3 = 0x01102131; +cpu_tcg.c: cpu->isar.id_isar4 = 0x141; +cpu_tcg.c: cpu->reset_auxcr = 1; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V6); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_M); +cpu_tcg.c: cpu->midr = 0x410cc200; +cpu_tcg.c: cpu->isar.id_pfr0 = 0x00000030; +cpu_tcg.c: cpu->isar.id_pfr1 = 0x00000200; +cpu_tcg.c: cpu->isar.id_dfr0 = 0x00100000; +cpu_tcg.c: cpu->id_afr0 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr0 = 0x00000030; +cpu_tcg.c: cpu->isar.id_mmfr1 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr2 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr3 = 0x00000000; +cpu_tcg.c: cpu->isar.id_isar0 = 0x01141110; +cpu_tcg.c: cpu->isar.id_isar1 = 0x02111000; +cpu_tcg.c: cpu->isar.id_isar2 = 0x21112231; +cpu_tcg.c: cpu->isar.id_isar3 = 0x01111110; +cpu_tcg.c: cpu->isar.id_isar4 = 0x01310102; +cpu_tcg.c: cpu->isar.id_isar5 = 0x00000000; +cpu_tcg.c: cpu->isar.id_isar6 = 0x00000000; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V7); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_M); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_M_MAIN); +cpu_tcg.c: cpu->midr = 0x410fc231; +cpu_tcg.c: cpu->pmsav7_dregion = 8; +cpu_tcg.c: cpu->isar.id_pfr0 = 0x00000030; +cpu_tcg.c: cpu->isar.id_pfr1 = 0x00000200; +cpu_tcg.c: cpu->isar.id_dfr0 = 0x00100000; +cpu_tcg.c: cpu->id_afr0 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr0 = 0x00000030; +cpu_tcg.c: cpu->isar.id_mmfr1 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr2 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr3 = 0x00000000; +cpu_tcg.c: cpu->isar.id_isar0 = 0x01141110; +cpu_tcg.c: cpu->isar.id_isar1 = 0x02111000; +cpu_tcg.c: cpu->isar.id_isar2 = 0x21112231; +cpu_tcg.c: cpu->isar.id_isar3 = 0x01111110; +cpu_tcg.c: cpu->isar.id_isar4 = 0x01310102; +cpu_tcg.c: cpu->isar.id_isar5 = 0x00000000; +cpu_tcg.c: cpu->isar.id_isar6 = 0x00000000; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V7); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_M); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_M_MAIN); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); +cpu_tcg.c: cpu->midr = 0x410fc240; /* r0p0 */ +cpu_tcg.c: cpu->pmsav7_dregion = 8; +cpu_tcg.c: cpu->isar.mvfr0 = 0x10110021; +cpu_tcg.c: cpu->isar.mvfr1 = 0x11000011; +cpu_tcg.c: cpu->isar.mvfr2 = 0x00000000; +cpu_tcg.c: cpu->isar.id_pfr0 = 0x00000030; +cpu_tcg.c: cpu->isar.id_pfr1 = 0x00000200; +cpu_tcg.c: cpu->isar.id_dfr0 = 0x00100000; +cpu_tcg.c: cpu->id_afr0 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr0 = 0x00000030; +cpu_tcg.c: cpu->isar.id_mmfr1 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr2 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr3 = 0x00000000; +cpu_tcg.c: cpu->isar.id_isar0 = 0x01141110; +cpu_tcg.c: cpu->isar.id_isar1 = 0x02111000; +cpu_tcg.c: cpu->isar.id_isar2 = 0x21112231; +cpu_tcg.c: cpu->isar.id_isar3 = 0x01111110; +cpu_tcg.c: cpu->isar.id_isar4 = 0x01310102; +cpu_tcg.c: cpu->isar.id_isar5 = 0x00000000; +cpu_tcg.c: cpu->isar.id_isar6 = 0x00000000; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V7); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_M); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_M_MAIN); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); +cpu_tcg.c: cpu->midr = 0x411fc272; /* r1p2 */ +cpu_tcg.c: cpu->pmsav7_dregion = 8; +cpu_tcg.c: cpu->isar.mvfr0 = 0x10110221; +cpu_tcg.c: cpu->isar.mvfr1 = 0x12000011; +cpu_tcg.c: cpu->isar.mvfr2 = 0x00000040; +cpu_tcg.c: cpu->isar.id_pfr0 = 0x00000030; +cpu_tcg.c: cpu->isar.id_pfr1 = 0x00000200; +cpu_tcg.c: cpu->isar.id_dfr0 = 0x00100000; +cpu_tcg.c: cpu->id_afr0 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr0 = 0x00100030; +cpu_tcg.c: cpu->isar.id_mmfr1 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr2 = 0x01000000; +cpu_tcg.c: cpu->isar.id_mmfr3 = 0x00000000; +cpu_tcg.c: cpu->isar.id_isar0 = 0x01101110; +cpu_tcg.c: cpu->isar.id_isar1 = 0x02112000; +cpu_tcg.c: cpu->isar.id_isar2 = 0x20232231; +cpu_tcg.c: cpu->isar.id_isar3 = 0x01111131; +cpu_tcg.c: cpu->isar.id_isar4 = 0x01310132; +cpu_tcg.c: cpu->isar.id_isar5 = 0x00000000; +cpu_tcg.c: cpu->isar.id_isar6 = 0x00000000; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V8); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_M); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_M_MAIN); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_M_SECURITY); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); +cpu_tcg.c: cpu->midr = 0x410fd213; /* r0p3 */ +cpu_tcg.c: cpu->pmsav7_dregion = 16; +cpu_tcg.c: cpu->sau_sregion = 8; +cpu_tcg.c: cpu->isar.mvfr0 = 0x10110021; +cpu_tcg.c: cpu->isar.mvfr1 = 0x11000011; +cpu_tcg.c: cpu->isar.mvfr2 = 0x00000040; +cpu_tcg.c: cpu->isar.id_pfr0 = 0x00000030; +cpu_tcg.c: cpu->isar.id_pfr1 = 0x00000210; +cpu_tcg.c: cpu->isar.id_dfr0 = 0x00200000; +cpu_tcg.c: cpu->id_afr0 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr0 = 0x00101F40; +cpu_tcg.c: cpu->isar.id_mmfr1 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr2 = 0x01000000; +cpu_tcg.c: cpu->isar.id_mmfr3 = 0x00000000; +cpu_tcg.c: cpu->isar.id_isar0 = 0x01101110; +cpu_tcg.c: cpu->isar.id_isar1 = 0x02212000; +cpu_tcg.c: cpu->isar.id_isar2 = 0x20232232; +cpu_tcg.c: cpu->isar.id_isar3 = 0x01111131; +cpu_tcg.c: cpu->isar.id_isar4 = 0x01310132; +cpu_tcg.c: cpu->isar.id_isar5 = 0x00000000; +cpu_tcg.c: cpu->isar.id_isar6 = 0x00000000; +cpu_tcg.c: cpu->clidr = 0x00000000; +cpu_tcg.c: cpu->ctr = 0x8000c000; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V7); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V7MP); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_PMSA); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_PMU); +cpu_tcg.c: cpu->midr = 0x411fc153; /* r1p3 */ +cpu_tcg.c: cpu->isar.id_pfr0 = 0x0131; +cpu_tcg.c: cpu->isar.id_pfr1 = 0x001; +cpu_tcg.c: cpu->isar.id_dfr0 = 0x010400; +cpu_tcg.c: cpu->id_afr0 = 0x0; +cpu_tcg.c: cpu->isar.id_mmfr0 = 0x0210030; +cpu_tcg.c: cpu->isar.id_mmfr1 = 0x00000000; +cpu_tcg.c: cpu->isar.id_mmfr2 = 0x01200000; +cpu_tcg.c: cpu->isar.id_mmfr3 = 0x0211; +cpu_tcg.c: cpu->isar.id_isar0 = 0x02101111; +cpu_tcg.c: cpu->isar.id_isar1 = 0x13112111; +cpu_tcg.c: cpu->isar.id_isar2 = 0x21232141; +cpu_tcg.c: cpu->isar.id_isar3 = 0x01112131; +cpu_tcg.c: cpu->isar.id_isar4 = 0x0010142; +cpu_tcg.c: cpu->isar.id_isar5 = 0x0; +cpu_tcg.c: cpu->isar.id_isar6 = 0x0; +cpu_tcg.c: cpu->mp_is_up = true; +cpu_tcg.c: cpu->pmsav7_dregion = 16; +cpu_tcg.c: cpu->isar.mvfr0 = 0x10110221; +cpu_tcg.c: cpu->isar.mvfr1 = 0x00000011; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V4T); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_OMAPCP); +cpu_tcg.c: cpu->midr = ARM_CPUID_TI925T; +cpu_tcg.c: cpu->ctr = 0x5109149; +cpu_tcg.c: cpu->reset_sctlr = 0x00000070; +cpu_tcg.c: cpu->dtb_compatible = "intel,sa1100"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_STRONGARM); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu_tcg.c: cpu->midr = 0x4401A11B; +cpu_tcg.c: cpu->reset_sctlr = 0x00000070; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_STRONGARM); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu_tcg.c: cpu->midr = 0x6901B119; +cpu_tcg.c: cpu->reset_sctlr = 0x00000070; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: cpu->midr = 0x69052100; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: cpu->midr = 0x69052d00; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: cpu->midr = 0x69052903; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: cpu->midr = 0x69052d05; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: cpu->midr = 0x69052d06; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_IWMMXT); +cpu_tcg.c: cpu->midr = 0x69054110; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_IWMMXT); +cpu_tcg.c: cpu->midr = 0x69054111; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_IWMMXT); +cpu_tcg.c: cpu->midr = 0x69054112; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_IWMMXT); +cpu_tcg.c: cpu->midr = 0x69054113; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_IWMMXT); +cpu_tcg.c: cpu->midr = 0x69054114; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +cpu_tcg.c: cpu->dtb_compatible = "marvell,xscale"; +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_V5); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_XSCALE); +cpu_tcg.c: set_feature(&cpu->env, ARM_FEATURE_IWMMXT); +cpu_tcg.c: cpu->midr = 0x69054117; +cpu_tcg.c: cpu->ctr = 0xd172172; +cpu_tcg.c: cpu->reset_sctlr = 0x00000078; +arm-powerctl.c:#include "cpu-qom.h" +arm-powerctl.c: if (armcpu->mp_affinity == id) { +arm-powerctl.c: if ((info->target_el < 3) && arm_feature(&target_cpu->env, +arm-powerctl.c: target_cpu->env.cp15.scr_el3 |= SCR_RW; +arm-powerctl.c: if ((info->target_el < 2) && arm_feature(&target_cpu->env, +arm-powerctl.c: target_cpu->env.cp15.hcr_el2 |= HCR_RW; +arm-powerctl.c: target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true); +arm-powerctl.c: cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M, +arm-powerctl.c: target_cpu->env.cp15.scr_el3 &= ~SCR_NS; +arm-powerctl.c: target_cpu->env.cp15.scr_el3 |= SCR_NS; +arm-powerctl.c: target_cpu->env.cp15.nsacr |= 3 << 10; +arm-powerctl.c: if (arm_feature(&target_cpu->env, ARM_FEATURE_EL3) +arm-powerctl.c: target_cpu->env.cp15.scr_el3 |= SCR_HCE; +arm-powerctl.c: assert(info->target_el == arm_current_el(&target_cpu->env)); +arm-powerctl.c: target_cpu->env.xregs[0] = info->context_id; +arm-powerctl.c: target_cpu->env.regs[0] = info->context_id; +arm-powerctl.c: arm_rebuild_hflags(&target_cpu->env); +arm-powerctl.c: target_cpu->power_state = PSCI_ON; +arm-powerctl.c: if (target_cpu->power_state == PSCI_ON) { +arm-powerctl.c: if (((target_el == 3) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) || +arm-powerctl.c: ((target_el == 2) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL2))) { +arm-powerctl.c: if (!target_aa64 && arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64)) { +arm-powerctl.c: if (target_cpu->power_state == PSCI_ON_PENDING) { +arm-powerctl.c: target_cpu->power_state = PSCI_ON; +arm-powerctl.c: if (target_cpu->power_state == PSCI_ON) { +arm-powerctl.c: if (target_cpu->power_state == PSCI_ON_PENDING) { +arm-powerctl.c: target_cpu->power_state = PSCI_OFF; +arm-powerctl.c: if (target_cpu->power_state == PSCI_OFF) { +arm-powerctl.c: if (target_cpu->power_state == PSCI_OFF) { +m_helper.c: CPUARMState *env = &cpu->env; +m_helper.c: CPUARMState *env = &cpu->env; +m_helper.c: CPUARMState *env = &cpu->env; +m_helper.c: exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); +m_helper.c: if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) { +m_helper.c: CPUARMState *env = &cpu->env; +m_helper.c: CPUARMState *env = &cpu->env; +m_helper.c: CPUARMState *env = &cpu->env; +m_helper.c: CPUARMState *env = &cpu->env; +m_helper.c: CPUARMState *env = &cpu->env; +m_helper.c: CPUARMState *env = &cpu->env; +m_helper.c: CPUARMState *env = &cpu->env; +m_helper.c: CPUARMState *env = &cpu->env; +translate-a64.c:#include "qemuafl/cpu-translate.h" +translate-a64.c: CPUARMState *env = cpu->env_ptr; +translate-a64.c: dc->isar = &arm_cpu->isar; +translate-a64.c: dc->cp_regs = arm_cpu->cp_regs; +translate-a64.c: dc->dcz_blocksize = arm_cpu->dcz_blocksize; +translate-a64.c: CPUARMState *env = cpu->env_ptr; +translate.c:#include "qemuafl/cpu-translate.h" +translate.c: * the cpu-exec main loop guarantees that we will always go straight +translate.c: dc->isar = &cpu->isar; +translate.c: dc->cp_regs = cpu->cp_regs; +translate.c: CPUARMState *env = cpu->env_ptr; +translate.c: CPUARMState *env = cpu->env_ptr; +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: return (cpu->power_state != PSCI_OFF) +cpu.c: QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node); +cpu.c: QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node); +cpu.c: ri->resetfn(&cpu->env, ri); +cpu.c: CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue; +cpu.c: CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue; +cpu.c: oldvalue = read_raw_cp_reg(&cpu->env, ri); +cpu.c: newvalue = read_raw_cp_reg(&cpu->env, ri); +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu); +cpu.c: g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu); +cpu.c: env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; +cpu.c: env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0; +cpu.c: env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1; +cpu.c: env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2; +cpu.c: cpu->power_state = s->start_powered_off ? PSCI_OFF : PSCI_ON; +cpu.c: env->vfp.zcr_el[1] = MIN(cpu->sve_max_vq - 1, 3); +cpu.c: env->pc = cpu->rvbar; +cpu.c: env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80; +cpu.c: if (cpu->pmsav7_dregion > 0) { +cpu.c: * cpu->pmsav7_dregion); +cpu.c: * cpu->pmsav7_dregion); +cpu.c: * cpu->pmsav7_dregion); +cpu.c: * cpu->pmsav7_dregion); +cpu.c: sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion); +cpu.c: sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion); +cpu.c: sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion); +cpu.c: if (cpu->sau_sregion > 0) { +cpu.c: memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion); +cpu.c: memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion); +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: * Destroy function for cpu->cp_regs hashtable data entries. +cpu.c: cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, +cpu.c: QLIST_INIT(&cpu->pre_el_change_hooks); +cpu.c: QLIST_INIT(&cpu->el_change_hooks); +cpu.c: qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs, +cpu.c: ARRAY_SIZE(cpu->gt_timer_outputs)); +cpu.c: qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt, +cpu.c: qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt, +cpu.c: cpu->dtb_compatible = "qemu,unknown"; +cpu.c: cpu->psci_version = 1; /* By default assume PSCI v0.1 */ +cpu.c: cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; +cpu.c: cpu->psci_version = 2; /* TCG implements PSCI 0.2 */ +cpu.c: * because the CPU initfn will have already set cpu->pmsav7_dregion to +cpu.c: return cpu->has_pmu; +cpu.c: set_feature(&cpu->env, ARM_FEATURE_PMU); +cpu.c: unset_feature(&cpu->env, ARM_FEATURE_PMU); +cpu.c: cpu->has_pmu = value; +cpu.c: * muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz, +cpu.c: * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to +cpu.c: return NANOSECONDS_PER_SECOND > cpu->gt_cntfrq_hz ? +cpu.c: NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1; +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_M)) { +cpu.c: set_feature(&cpu->env, ARM_FEATURE_PMSA); +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) || +cpu.c: arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) { +cpu.c: if (!arm_feature(&cpu->env, ARM_FEATURE_M)) { +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) { +cpu.c: (Object **)&cpu->secure_memory, +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) { +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) { +cpu.c: cpu->has_pmu = true; +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) +cpu.c: cpu->has_vfp = true; +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_NEON)) { +cpu.c: cpu->has_neon = true; +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_M) && +cpu.c: arm_feature(&cpu->env, ARM_FEATURE_THUMB_DSP)) { +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) { +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_V7)) { +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_M_SECURITY)) { +cpu.c: object_property_add_link(obj, "idau", TYPE_IDAU_INTERFACE, &cpu->idau, +cpu.c: &cpu->init_svtor, +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) { +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && +cpu.c: (Object **)&cpu->tag_memory, +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) { +cpu.c: (Object **)&cpu->secure_tag_memory, +cpu.c: g_hash_table_destroy(cpu->cp_regs); +cpu.c: QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { +cpu.c: QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { +cpu.c: if (cpu->pmu_timer) { +cpu.c: timer_del(cpu->pmu_timer); +cpu.c: timer_deinit(cpu->pmu_timer); +cpu.c: timer_free(cpu->pmu_timer); +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: if (cpu->host_cpu_probe_failed) { +cpu.c: if (!cpu->gt_cntfrq_hz) { +cpu.c: cpu->gt_cntfrq_hz); +cpu.c: cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, scale, +cpu.c: cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale, +cpu.c: cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, scale, +cpu.c: cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, scale, +cpu.c: cpu->gt_timer[GTIMER_HYPVIRT] = timer_new(QEMU_CLOCK_VIRTUAL, scale, +cpu.c: cpu->has_vfp != cpu->has_neon) { +cpu.c: if (!cpu->has_vfp) { +cpu.c: t = cpu->isar.id_aa64isar1; +cpu.c: cpu->isar.id_aa64isar1 = t; +cpu.c: t = cpu->isar.id_aa64pfr0; +cpu.c: cpu->isar.id_aa64pfr0 = t; +cpu.c: u = cpu->isar.id_isar6; +cpu.c: cpu->isar.id_isar6 = u; +cpu.c: u = cpu->isar.mvfr0; +cpu.c: cpu->isar.mvfr0 = u; +cpu.c: u = cpu->isar.mvfr1; +cpu.c: cpu->isar.mvfr1 = u; +cpu.c: u = cpu->isar.mvfr2; +cpu.c: cpu->isar.mvfr2 = u; +cpu.c: if (!cpu->has_neon) { +cpu.c: t = cpu->isar.id_aa64isar0; +cpu.c: cpu->isar.id_aa64isar0 = t; +cpu.c: t = cpu->isar.id_aa64isar1; +cpu.c: cpu->isar.id_aa64isar1 = t; +cpu.c: t = cpu->isar.id_aa64pfr0; +cpu.c: cpu->isar.id_aa64pfr0 = t; +cpu.c: u = cpu->isar.id_isar5; +cpu.c: cpu->isar.id_isar5 = u; +cpu.c: u = cpu->isar.id_isar6; +cpu.c: cpu->isar.id_isar6 = u; +cpu.c: u = cpu->isar.mvfr1; +cpu.c: cpu->isar.mvfr1 = u; +cpu.c: u = cpu->isar.mvfr2; +cpu.c: cpu->isar.mvfr2 = u; +cpu.c: if (!cpu->has_neon && !cpu->has_vfp) { +cpu.c: t = cpu->isar.id_aa64isar0; +cpu.c: cpu->isar.id_aa64isar0 = t; +cpu.c: t = cpu->isar.id_aa64isar1; +cpu.c: cpu->isar.id_aa64isar1 = t; +cpu.c: u = cpu->isar.mvfr0; +cpu.c: cpu->isar.mvfr0 = u; +cpu.c: u = cpu->isar.mvfr1; +cpu.c: cpu->isar.mvfr1 = u; +cpu.c: if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) { +cpu.c: u = cpu->isar.id_isar1; +cpu.c: cpu->isar.id_isar1 = u; +cpu.c: u = cpu->isar.id_isar2; +cpu.c: cpu->isar.id_isar2 = u; +cpu.c: u = cpu->isar.id_isar3; +cpu.c: cpu->isar.id_isar3 = u; +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { +cpu.c: assert(arm_feature(&cpu->env, ARM_FEATURE_AARCH64) || +cpu.c: /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it. +cpu.c: if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) { +cpu.c: cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index, +cpu.c: if (cpu->reset_hivecs) { +cpu.c: cpu->reset_sctlr |= (1 << 13); +cpu.c: if (cpu->cfgend) { +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_V7)) { +cpu.c: cpu->reset_sctlr |= SCTLR_EE; +cpu.c: cpu->reset_sctlr |= SCTLR_B; +cpu.c: if (!cpu->has_el3) { +cpu.c: cpu->isar.id_pfr1 &= ~0xf0; +cpu.c: cpu->isar.id_aa64pfr0 &= ~0xf000; +cpu.c: if (!cpu->has_el2) { +cpu.c: if (!cpu->has_pmu) { +cpu.c: cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, arm_pmu_timer_cb, +cpu.c: cpu->isar.id_aa64dfr0 = +cpu.c: FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0); +cpu.c: cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0); +cpu.c: cpu->pmceid0 = 0; +cpu.c: cpu->pmceid1 = 0; +cpu.c: cpu->isar.id_aa64pfr0 &= ~0xf00; +cpu.c: cpu->isar.id_pfr1 &= ~0xf000; +cpu.c: if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) { +cpu.c: cpu->isar.id_aa64pfr1 = +cpu.c: FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0); +cpu.c: if (!cpu->has_mpu) { +cpu.c: cpu->pmsav7_dregion = 0; +cpu.c: if (cpu->pmsav7_dregion == 0) { +cpu.c: cpu->has_mpu = false; +cpu.c: uint32_t nr = cpu->pmsav7_dregion; +cpu.c: uint32_t nr = cpu->sau_sregion; +cpu.c: bool has_secure = cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY); +cpu.c: if (cpu->tag_memory != NULL) { +cpu.c: if (!cpu->secure_memory) { +cpu.c: cpu->secure_memory = cs->memory; +cpu.c: cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory", +cpu.c: cpu->secure_memory); +cpu.c: if (cpu->tag_memory != NULL) { +cpu.c: cpu_address_space_init(cs, ARMASIdx_TagNS, "cpu-tag-memory", +cpu.c: cpu->tag_memory); +cpu.c: cpu_address_space_init(cs, ARMASIdx_TagS, "cpu-tag-memory", +cpu.c: cpu->secure_tag_memory); +cpu.c: cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory); +cpu.c: if (cpu->core_count == -1) { +cpu.c: cpu->core_count = smp_cpus; +cpu.c: int dcz_blocklen = 4 << cpu->dcz_blocksize; +cpu.c: cpu->dtb_compatible = "arm,cortex-a8"; +cpu.c: set_feature(&cpu->env, ARM_FEATURE_V7); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_NEON); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_EL3); +cpu.c: cpu->midr = 0x410fc080; +cpu.c: cpu->reset_fpsid = 0x410330c0; +cpu.c: cpu->isar.mvfr0 = 0x11110222; +cpu.c: cpu->isar.mvfr1 = 0x00011111; +cpu.c: cpu->ctr = 0x82048004; +cpu.c: cpu->reset_sctlr = 0x00c50078; +cpu.c: cpu->isar.id_pfr0 = 0x1031; +cpu.c: cpu->isar.id_pfr1 = 0x11; +cpu.c: cpu->isar.id_dfr0 = 0x400; +cpu.c: cpu->id_afr0 = 0; +cpu.c: cpu->isar.id_mmfr0 = 0x31100003; +cpu.c: cpu->isar.id_mmfr1 = 0x20000000; +cpu.c: cpu->isar.id_mmfr2 = 0x01202000; +cpu.c: cpu->isar.id_mmfr3 = 0x11; +cpu.c: cpu->isar.id_isar0 = 0x00101111; +cpu.c: cpu->isar.id_isar1 = 0x12112111; +cpu.c: cpu->isar.id_isar2 = 0x21232031; +cpu.c: cpu->isar.id_isar3 = 0x11112131; +cpu.c: cpu->isar.id_isar4 = 0x00111142; +cpu.c: cpu->isar.dbgdidr = 0x15141000; +cpu.c: cpu->clidr = (1 << 27) | (2 << 24) | 3; +cpu.c: cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ +cpu.c: cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ +cpu.c: cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */ +cpu.c: cpu->reset_auxcr = 2; +cpu.c: cpu->dtb_compatible = "arm,cortex-a9"; +cpu.c: set_feature(&cpu->env, ARM_FEATURE_V7); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_NEON); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_EL3); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_V7MP); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_CBAR); +cpu.c: cpu->midr = 0x410fc090; +cpu.c: cpu->reset_fpsid = 0x41033090; +cpu.c: cpu->isar.mvfr0 = 0x11110222; +cpu.c: cpu->isar.mvfr1 = 0x01111111; +cpu.c: cpu->ctr = 0x80038003; +cpu.c: cpu->reset_sctlr = 0x00c50078; +cpu.c: cpu->isar.id_pfr0 = 0x1031; +cpu.c: cpu->isar.id_pfr1 = 0x11; +cpu.c: cpu->isar.id_dfr0 = 0x000; +cpu.c: cpu->id_afr0 = 0; +cpu.c: cpu->isar.id_mmfr0 = 0x00100103; +cpu.c: cpu->isar.id_mmfr1 = 0x20000000; +cpu.c: cpu->isar.id_mmfr2 = 0x01230000; +cpu.c: cpu->isar.id_mmfr3 = 0x00002111; +cpu.c: cpu->isar.id_isar0 = 0x00101111; +cpu.c: cpu->isar.id_isar1 = 0x13112111; +cpu.c: cpu->isar.id_isar2 = 0x21232041; +cpu.c: cpu->isar.id_isar3 = 0x11112131; +cpu.c: cpu->isar.id_isar4 = 0x00111142; +cpu.c: cpu->isar.dbgdidr = 0x35141000; +cpu.c: cpu->clidr = (1 << 27) | (1 << 24) | 3; +cpu.c: cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ +cpu.c: cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */ +cpu.c: cpu->dtb_compatible = "arm,cortex-a7"; +cpu.c: set_feature(&cpu->env, ARM_FEATURE_V7VE); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_NEON); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_EL2); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_EL3); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_PMU); +cpu.c: cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7; +cpu.c: cpu->midr = 0x410fc075; +cpu.c: cpu->reset_fpsid = 0x41023075; +cpu.c: cpu->isar.mvfr0 = 0x10110222; +cpu.c: cpu->isar.mvfr1 = 0x11111111; +cpu.c: cpu->ctr = 0x84448003; +cpu.c: cpu->reset_sctlr = 0x00c50078; +cpu.c: cpu->isar.id_pfr0 = 0x00001131; +cpu.c: cpu->isar.id_pfr1 = 0x00011011; +cpu.c: cpu->isar.id_dfr0 = 0x02010555; +cpu.c: cpu->id_afr0 = 0x00000000; +cpu.c: cpu->isar.id_mmfr0 = 0x10101105; +cpu.c: cpu->isar.id_mmfr1 = 0x40000000; +cpu.c: cpu->isar.id_mmfr2 = 0x01240000; +cpu.c: cpu->isar.id_mmfr3 = 0x02102211; +cpu.c: cpu->isar.id_isar0 = 0x02101110; +cpu.c: cpu->isar.id_isar1 = 0x13112111; +cpu.c: cpu->isar.id_isar2 = 0x21232041; +cpu.c: cpu->isar.id_isar3 = 0x11112131; +cpu.c: cpu->isar.id_isar4 = 0x10011142; +cpu.c: cpu->isar.dbgdidr = 0x3515f005; +cpu.c: cpu->clidr = 0x0a200023; +cpu.c: cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ +cpu.c: cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ +cpu.c: cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ +cpu.c: cpu->dtb_compatible = "arm,cortex-a15"; +cpu.c: set_feature(&cpu->env, ARM_FEATURE_V7VE); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_NEON); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_THUMB2EE); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_CBAR_RO); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_EL2); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_EL3); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_PMU); +cpu.c: cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15; +cpu.c: cpu->midr = 0x412fc0f1; +cpu.c: cpu->reset_fpsid = 0x410430f0; +cpu.c: cpu->isar.mvfr0 = 0x10110222; +cpu.c: cpu->isar.mvfr1 = 0x11111111; +cpu.c: cpu->ctr = 0x8444c004; +cpu.c: cpu->reset_sctlr = 0x00c50078; +cpu.c: cpu->isar.id_pfr0 = 0x00001131; +cpu.c: cpu->isar.id_pfr1 = 0x00011011; +cpu.c: cpu->isar.id_dfr0 = 0x02010555; +cpu.c: cpu->id_afr0 = 0x00000000; +cpu.c: cpu->isar.id_mmfr0 = 0x10201105; +cpu.c: cpu->isar.id_mmfr1 = 0x20000000; +cpu.c: cpu->isar.id_mmfr2 = 0x01240000; +cpu.c: cpu->isar.id_mmfr3 = 0x02102211; +cpu.c: cpu->isar.id_isar0 = 0x02101110; +cpu.c: cpu->isar.id_isar1 = 0x13112111; +cpu.c: cpu->isar.id_isar2 = 0x21232041; +cpu.c: cpu->isar.id_isar3 = 0x11112131; +cpu.c: cpu->isar.id_isar4 = 0x10011142; +cpu.c: cpu->isar.dbgdidr = 0x3515f021; +cpu.c: cpu->clidr = 0x0a200023; +cpu.c: cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ +cpu.c: cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ +cpu.c: cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */ +cpu.c: cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); +cpu.c: set_feature(&cpu->env, ARM_FEATURE_V8); +cpu.c: t = cpu->isar.id_isar5; +cpu.c: cpu->isar.id_isar5 = t; +cpu.c: t = cpu->isar.id_isar6; +cpu.c: cpu->isar.id_isar6 = t; +cpu.c: t = cpu->isar.mvfr1; +cpu.c: cpu->isar.mvfr1 = t; +cpu.c: t = cpu->isar.mvfr2; +cpu.c: cpu->isar.mvfr2 = t; +cpu.c: t = cpu->isar.id_mmfr3; +cpu.c: cpu->isar.id_mmfr3 = t; +cpu.c: t = cpu->isar.id_mmfr4; +cpu.c: cpu->isar.id_mmfr4 = t; +cpu.c: CPUARMState *env = &cpu->env; +cpu.c: if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { +internals.h: FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); +internals.h: QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { +internals.h: QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { +internals.h: if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { +internals.h: return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; +internals.h: return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; +internals.h: if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { +internals.h: return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; +internals.h: return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; +internals.h: if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { +internals.h: return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; +internals.h: return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; +helper.c: key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg]; +helper.c: ri = get_arm_cp_reginfo(cpu->cp_regs, key); +helper.c: for (vq = 0; vq < cpu->sve_max_vq; vq++) { +helper.c: for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { +helper.c: for (vq = 0; vq < cpu->sve_max_vq; vq++) { +helper.c: for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { +helper.c: /* Write the coprocessor state from cpu->env to the (index,value) list. */ +helper.c: for (i = 0; i < cpu->cpreg_array_len; i++) { +helper.c: uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); +helper.c: ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); +helper.c: newval = read_raw_cp_reg(&cpu->env, ri); +helper.c: uint64_t oldval = cpu->cpreg_values[i]; +helper.c: write_raw_cp_reg(&cpu->env, ri, oldval); +helper.c: if (read_raw_cp_reg(&cpu->env, ri) != oldval) { +helper.c: write_raw_cp_reg(&cpu->env, ri, newval); +helper.c: cpu->cpreg_values[i] = newval; +helper.c: for (i = 0; i < cpu->cpreg_array_len; i++) { +helper.c: uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); +helper.c: uint64_t v = cpu->cpreg_values[i]; +helper.c: ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); +helper.c: write_raw_cp_reg(&cpu->env, ri, v); +helper.c: if (read_raw_cp_reg(&cpu->env, ri) != v) { +helper.c: ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); +helper.c: cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); +helper.c: cpu->cpreg_array_len++; +helper.c: ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); +helper.c: cpu->cpreg_array_len++; +helper.c: keys = g_hash_table_get_keys(cpu->cp_regs); +helper.c: cpu->cpreg_array_len = 0; +helper.c: arraylen = cpu->cpreg_array_len; +helper.c: cpu->cpreg_indexes = g_new(uint64_t, arraylen); +helper.c: cpu->cpreg_values = g_new(uint64_t, arraylen); +helper.c: cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); +helper.c: cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); +helper.c: cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; +helper.c: cpu->cpreg_array_len = 0; +helper.c: assert(cpu->cpreg_array_len == arraylen); +helper.c: * Empty supported_event_map and cpu->pmceid[01] before adding supported +helper.c: cpu->pmceid0 = 0; +helper.c: cpu->pmceid1 = 0; +helper.c: if (cnt->supported(&cpu->env)) { +helper.c: cpu->pmceid1 |= event_mask; +helper.c: cpu->pmceid0 |= event_mask; +helper.c: qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && +helper.c: timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); +helper.c: timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); +helper.c: pmu_op_start(&cpu->env); +helper.c: pmu_op_finish(&cpu->env); +helper.c: * has the effect of setting the cpu->pmu_timer to the next earliest time a +helper.c: pmu_op_start(&cpu->env); +helper.c: pmu_op_finish(&cpu->env); +helper.c: return cpu->ccsidr[index]; +helper.c: ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; +helper.c: cpu->env.cp15.cntvoff_el2 : 0; +helper.c: uint64_t count = gt_get_countervalue(&cpu->env); +helper.c: qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); +helper.c: timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); +helper.c: timer_mod(cpu->gt_timer[timeridx], nexttick); +helper.c: qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); +helper.c: timer_del(cpu->gt_timer[timeridx]); +helper.c: timer_del(cpu->gt_timer[timeridx]); +helper.c: qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); +helper.c: cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; +helper.c: uint32_t nrgs = cpu->pmsav7_dregion; +helper.c: if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { +helper.c: uint64_t mpidr = cpu->mp_affinity; +helper.c: if (cpu->mp_is_up) { +helper.c: return cpu->dcz_blocksize | dzp_bit; +helper.c: if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { +helper.c: } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { +helper.c: if (a->feature && !a->feature(&cpu->isar)) { +helper.c: src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key); +helper.c: dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key); +helper.c: ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg); +helper.c: if (!test_bit(start_len, cpu->sve_vq_map)) { +helper.c: end_len = find_last_bit(cpu->sve_vq_map, start_len); +helper.c: uint32_t zcr_len = cpu->sve_max_vq - 1; +helper.c: CPUARMState *env = &cpu->env; +helper.c: CPUARMState *env = &cpu->env; +helper.c: for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { +helper.c: CPUARMState *env = &cpu->env; +helper.c: CPUARMState *env = &cpu->env; +helper.c: for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { +helper.c: .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, +helper.c: if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { +helper.c: .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | +helper.c: .resetvalue = extract64(cpu->pmceid0, 32, 32) }, +helper.c: .resetvalue = extract64(cpu->pmceid1, 32, 32) }, +helper.c: uint64_t pfr1 = cpu->isar.id_pfr1; +helper.c: uint64_t pfr0 = cpu->isar.id_aa64pfr0; +helper.c: uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); +helper.c: CPUARMState *env = &cpu->env; +helper.c: .resetvalue = cpu->isar.id_pfr0 }, +helper.c: .resetvalue = cpu->isar.id_dfr0 }, +helper.c: .resetvalue = cpu->id_afr0 }, +helper.c: .resetvalue = cpu->isar.id_mmfr0 }, +helper.c: .resetvalue = cpu->isar.id_mmfr1 }, +helper.c: .resetvalue = cpu->isar.id_mmfr2 }, +helper.c: .resetvalue = cpu->isar.id_mmfr3 }, +helper.c: .resetvalue = cpu->isar.id_isar0 }, +helper.c: .resetvalue = cpu->isar.id_isar1 }, +helper.c: .resetvalue = cpu->isar.id_isar2 }, +helper.c: .resetvalue = cpu->isar.id_isar3 }, +helper.c: .resetvalue = cpu->isar.id_isar4 }, +helper.c: .resetvalue = cpu->isar.id_isar5 }, +helper.c: .resetvalue = cpu->isar.id_mmfr4 }, +helper.c: .resetvalue = cpu->isar.id_isar6 }, +helper.c: .resetvalue = cpu->clidr +helper.c: .resetvalue = cpu->isar.id_aa64pfr0 +helper.c: .resetvalue = cpu->isar.id_aa64pfr1}, +helper.c: .resetvalue = cpu->isar.id_aa64dfr0 }, +helper.c: .resetvalue = cpu->isar.id_aa64dfr1 }, +helper.c: .resetvalue = cpu->id_aa64afr0 }, +helper.c: .resetvalue = cpu->id_aa64afr1 }, +helper.c: .resetvalue = cpu->isar.id_aa64isar0 }, +helper.c: .resetvalue = cpu->isar.id_aa64isar1 }, +helper.c: .resetvalue = cpu->isar.id_aa64mmfr0 }, +helper.c: .resetvalue = cpu->isar.id_aa64mmfr1 }, +helper.c: .resetvalue = cpu->isar.id_aa64mmfr2 }, +helper.c: .resetvalue = cpu->isar.mvfr0 }, +helper.c: .resetvalue = cpu->isar.mvfr1 }, +helper.c: .resetvalue = cpu->isar.mvfr2 }, +helper.c: .resetvalue = extract64(cpu->pmceid0, 0, 32) }, +helper.c: .resetvalue = cpu->pmceid0 }, +helper.c: .resetvalue = extract64(cpu->pmceid1, 0, 32) }, +helper.c: .resetvalue = cpu->pmceid1 }, +helper.c: .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar +helper.c: .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, +helper.c: .access = PL2_RW, .resetvalue = cpu->midr, +helper.c: .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar +helper.c: .type = ARM_CP_CONST, .resetvalue = cpu->midr, +helper.c: .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, +helper.c: .resetvalue = cpu->reset_sctlr }, +helper.c: .access = PL1_R, .resetvalue = cpu->midr, +helper.c: .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, +helper.c: .access = PL1_R, .resetvalue = cpu->midr }, +helper.c: .access = PL1_R, .resetvalue = cpu->midr }, +helper.c: .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, +helper.c: .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, +helper.c: .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, +helper.c: .resetvalue = cpu->pmsav7_dregion << 8 +helper.c: .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, +helper.c: uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) +helper.c: | extract64(cpu->reset_cbar, 32, 12); +helper.c: .access = PL1_R, .resetvalue = cpu->reset_cbar }, +helper.c: .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, +helper.c: .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, +helper.c: CPUARMState *env = &cpu->env; +helper.c: if (isar_feature_aa64_sve(&cpu->isar)) { +helper.c: (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { +helper.c: oldreg = g_hash_table_lookup(cpu->cp_regs, key); +helper.c: g_hash_table_insert(cpu->cp_regs, key, r2); +helper.c: if (arm_feature(&cpu->env, ARM_FEATURE_V8) && +helper.c: !arm_feature(&cpu->env, ARM_FEATURE_M)) { +helper.c: CPUARMState *env = &cpu->env; +helper.c: CPUARMState *env = &cpu->env; +helper.c: CPUARMState *env = &cpu->env; +helper.c: CPUARMState *env = &cpu->env; +helper.c: CPUARMState *env = &cpu->env; +helper.c: CPUARMState *env = &cpu->env; +helper.c: CPUARMState *env = &cpu->env; +helper.c: CPUARMState *env = &cpu->env; +helper.c: CPUARMState *env = &cpu->env; +helper.c: for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { +helper.c: if (cpu->idau) { +helper.c: IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); +helper.c: IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); +helper.c: for (r = 0; r < cpu->sau_sregion; r++) { +helper.c: for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { +helper.c: CPUARMState *env = &cpu->env; +debug_helper.c: CPUARMState *env = &cpu->env; +debug_helper.c: CPUARMState *env = &cpu->env; +debug_helper.c: CPUARMState *env = &cpu->env; +debug_helper.c: CPUARMState *env = &cpu->env; +debug_helper.c: CPUARMState *env = &cpu->env; +debug_helper.c: CPUARMState *env = &cpu->env; +kvm64.c: assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); +kvm64.c: for (vq = 1; vq <= cpu->sve_max_vq; ++vq) { +kvm64.c: if (test_bit(vq - 1, cpu->sve_vq_map)) { +kvm64.c: CPUARMState *env = &cpu->env; +kvm64.c: if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || +kvm64.c: memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); +kvm64.c: cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; +kvm64.c: cpu->psci_version = 2; +kvm64.c: cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; +kvm64.c: if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { +kvm64.c: cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; +kvm64.c: cpu->has_pmu = false; +kvm64.c: if (cpu->has_pmu) { +kvm64.c: cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; +kvm64.c: cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; +kvm64.c: cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; +kvm64.c: CPUARMState *env = &cpu->env; +kvm64.c: CPUARMState *env = &cpu->env; +kvm64.c: r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); +kvm64.c: DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); +kvm64.c: DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); +kvm64.c: CPUARMState *env = &cpu->env; +kvm64.c: CPUARMState *env = &cpu->env; +kvm64.c: sve_bswap64(r, r, cpu->sve_max_vq * 2); +kvm64.c: sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); +kvm64.c: sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); +kvm64.c: CPUARMState *env = &cpu->env; +kvm64.c: CPUARMState *env = &cpu->env; +kvm64.c: CPUARMState *env = &cpu->env; +gdbstub.c: CPUARMState *env = &cpu->env; +gdbstub.c: CPUARMState *env = &cpu->env; +gdbstub.c: CPUARMState *env = &cpu->env; +gdbstub.c: DynamicGDBXMLInfo *dyn_xml = &cpu->dyn_sysreg_xml; +gdbstub.c: cpu->dyn_sysreg_xml.num = 0; +gdbstub.c: cpu->dyn_sysreg_xml.data.cpregs.keys = g_new(uint32_t, g_hash_table_size(cpu->cp_regs)); +gdbstub.c: g_hash_table_foreach(cpu->cp_regs, arm_register_sysreg_for_xml, ¶m); +gdbstub.c: cpu->dyn_sysreg_xml.desc = g_string_free(s, false); +gdbstub.c: return cpu->dyn_sysreg_xml.num; +gdbstub.c: DynamicGDBXMLInfo *info = &cpu->dyn_svereg_xml; +gdbstub.c: int i, bits, reg_width = (cpu->sve_max_vq * 128); +gdbstub.c: cpu->sve_max_vq); +gdbstub.c: i, cpu->sve_max_vq * 16, base_reg++); +gdbstub.c: cpu->sve_max_vq * 16, base_reg++); +gdbstub.c: cpu->dyn_svereg_xml.desc = g_string_free(s, false); +gdbstub.c: return cpu->dyn_svereg_xml.num; +gdbstub.c: return cpu->dyn_sysreg_xml.desc; +gdbstub.c: return cpu->dyn_svereg_xml.desc; diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index b23a8975d54c4..2a2fe3942cdb8 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -39,6 +39,35 @@ #include "translate-a64.h" #include "qemu/atomic128.h" +#include "qemuafl/cpu-translate.h" +#include "qemuafl/qasan-qemu.h" + +// SP = 31, LINK = 30 + +#define AFL_QEMU_TARGET_ARM64_SNIPPET \ + if (is_persistent) { \ + \ + if (s->pc_curr == afl_persistent_addr) { \ + \ + gen_helper_afl_persistent_routine(cpu_env); \ + \ + if (afl_persistent_ret_addr == 0 && !persistent_exits) { \ + \ + tcg_gen_movi_tl(cpu_X[30], afl_persistent_addr); \ + \ + } \ + \ + if (!persistent_save_gpr) afl_gen_tcg_plain_call(&afl_persistent_loop); \ + \ + } else if (afl_persistent_ret_addr && \ + s->pc_curr == afl_persistent_ret_addr) { \ + \ + gen_goto_tb(s, 0, afl_persistent_addr); \ + \ + } \ + \ + } + static TCGv_i64 cpu_X[32]; static TCGv_i64 cpu_pc; @@ -1314,6 +1343,8 @@ static void disas_uncond_b_imm(DisasContext *s, uint32_t insn) if (insn & (1U << 31)) { /* BL Branch with link */ + if (use_qasan && qasan_max_call_stack) + gen_helper_qasan_shadow_stack_push(tcg_const_tl(s->pc_curr + 4)); tcg_gen_movi_i64(cpu_reg(s, 30), s->base.pc_next); } @@ -2218,6 +2249,12 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) default: goto do_unallocated; } + if (use_qasan && qasan_max_call_stack) { + if (opc == 2 && rn == 30) + gen_helper_qasan_shadow_stack_pop(cpu_reg(s, 30)); + else if (opc == 1) + gen_helper_qasan_shadow_stack_push(tcg_const_tl(s->pc_curr + 4)); + } gen_a64_set_pc(s, dst); /* BLR also needs to load return address */ if (opc == 1) { @@ -4191,6 +4228,12 @@ static void disas_add_sub_imm(DisasContext *s, uint32_t insn) if (shift) { imm <<= 12; } + + if (rd == 31 && sub_op) { // cmp xX, imm + TCGv_i64 tcg_imm = tcg_const_i64(imm); + afl_gen_compcov(s->pc_curr, tcg_rn, tcg_imm, is_64bit ? MO_64 : MO_32, 1); + tcg_temp_free_i64(tcg_imm); + } tcg_result = tcg_temp_new_i64(); if (!setflags) { @@ -4853,6 +4896,9 @@ static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn) tcg_rm = read_cpu_reg(s, rm, sf); ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3); + + if (rd == 31 && sub_op) // cmp xX, xY + afl_gen_compcov(s->pc_curr, tcg_rn, tcg_rm, sf ? MO_64 : MO_32, 0); tcg_result = tcg_temp_new_i64(); @@ -4917,6 +4963,9 @@ static void disas_add_sub_reg(DisasContext *s, uint32_t insn) tcg_rm = read_cpu_reg(s, rm, sf); shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6); + + if (rd == 31 && sub_op) // cmp xX, xY + afl_gen_compcov(s->pc_curr, tcg_rn, tcg_rm, sf ? MO_64 : MO_32, 0); tcg_result = tcg_temp_new_i64(); @@ -5200,6 +5249,8 @@ static void disas_cc(DisasContext *s, uint32_t insn) } tcg_rn = cpu_reg(s, rn); + afl_gen_compcov(s->pc_curr, tcg_rn, tcg_y, sf ? MO_64 : MO_32, is_imm); + /* Set the flags for the new comparison. */ tcg_tmp = tcg_temp_new_i64(); if (op) { @@ -14605,7 +14656,7 @@ static bool btype_destination_ok(uint32_t insn, bool bt, int btype) static void disas_a64_insn(CPUARMState *env, DisasContext *s) { uint32_t insn; - + s->pc_curr = s->base.pc_next; insn = arm_ldl_code(env, s->base.pc_next, s->sctlr_b); s->insn = insn; @@ -14614,6 +14665,8 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s) s->fp_access_checked = false; s->sve_access_checked = false; + AFL_QEMU_TARGET_ARM64_SNIPPET + if (dc_isar_feature(aa64_bti, s)) { if (s->base.num_insns == 1) { /* diff --git a/target/arm/translate.c b/target/arm/translate.c index 1653cca1aaafb..35f1747c2e6f0 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -37,6 +37,195 @@ #include "trace-tcg.h" #include "exec/log.h" +#include "qemuafl/cpu-translate.h" + +// TODO QASAN shadow stack +//#include "qemuafl/qasan-qemu.h" + +// SP = 13, LINK = 14 + +#define AFL_QEMU_TARGET_ARM_SNIPPET \ + if (is_persistent) { \ + \ + if (dc->pc_curr == afl_persistent_addr) { \ + \ + gen_helper_afl_persistent_routine(cpu_env); \ + \ + if (afl_persistent_ret_addr == 0 && !persistent_exits) { \ + \ + tcg_gen_movi_i32(cpu_R[14], afl_persistent_addr); \ + \ + } \ + \ + if (!persistent_save_gpr) afl_gen_tcg_plain_call(&afl_persistent_loop); \ + \ + } else if (afl_persistent_ret_addr && \ + dc->pc_curr == afl_persistent_ret_addr) { \ + \ + TCGv_i32 tmp = tcg_const_i32(afl_persistent_addr); \ + gen_bx(dc, tmp); \ + tcg_temp_free_i32(tmp); \ + \ + } \ + \ + } + +#define AFL_QEMU_TARGET_THUMB_SNIPPET \ + if (is_persistent) { \ + \ + if (dc->pc_curr == (afl_persistent_addr & ~1)) { \ + \ + gen_helper_afl_persistent_routine(cpu_env); \ + \ + if (afl_persistent_ret_addr == 0 && !persistent_exits) { \ + \ + tcg_gen_movi_i32(cpu_R[14], afl_persistent_addr | 1); \ + \ + } \ + \ + if (!persistent_save_gpr) afl_gen_tcg_plain_call(&afl_persistent_loop); \ + \ + } else if (afl_persistent_ret_addr && \ + dc->pc_curr == afl_persistent_ret_addr) { \ + \ + TCGv_i32 tmp = tcg_const_i32(afl_persistent_addr | 1); \ + gen_bx(dc, tmp); \ + tcg_temp_free_i32(tmp); \ + \ + } \ + \ + } + +void afl_save_regs(struct api_regs* r, CPUArchState* env) { + + int i; +#ifdef TARGET_AARCH64 + r->x0 = env->xregs[0]; + r->x1 = env->xregs[1]; + r->x2 = env->xregs[2]; + r->x3 = env->xregs[3]; + r->x4 = env->xregs[4]; + r->x5 = env->xregs[5]; + r->x6 = env->xregs[6]; + r->x7 = env->xregs[7]; + r->x8 = env->xregs[8]; + r->x9 = env->xregs[9]; + r->x10 = env->xregs[10]; + r->x11 = env->xregs[11]; + r->x12 = env->xregs[12]; + r->x13 = env->xregs[13]; + r->x14 = env->xregs[14]; + r->x15 = env->xregs[15]; + r->x16 = env->xregs[16]; + r->x17 = env->xregs[17]; + r->x18 = env->xregs[18]; + r->x19 = env->xregs[19]; + r->x20 = env->xregs[20]; + r->x21 = env->xregs[21]; + r->x22 = env->xregs[22]; + r->x23 = env->xregs[23]; + r->x24 = env->xregs[24]; + r->x25 = env->xregs[25]; + r->x26 = env->xregs[26]; + r->x27 = env->xregs[27]; + r->x28 = env->xregs[28]; + r->x29 = env->xregs[29]; + r->x30 = env->xregs[30]; + r->x31 = env->xregs[31]; + for (i = 0; i < 17; ++i) + memcpy(r->vfp_pregs[i], &env->vfp.pregs[i], sizeof(r->vfp_pregs[i])); +#else + r->r0 = env->regs[0]; + r->r1 = env->regs[1]; + r->r2 = env->regs[2]; + r->r3 = env->regs[3]; + r->r4 = env->regs[4]; + r->r5 = env->regs[5]; + r->r6 = env->regs[6]; + r->r7 = env->regs[7]; + r->r8 = env->regs[8]; + r->r9 = env->regs[9]; + r->r10 = env->regs[10]; + r->r11 = env->regs[11]; + r->r12 = env->regs[12]; + r->r13 = env->regs[13]; + r->r14 = env->regs[14]; + r->r15 = env->regs[15]; + // r->r15 = env->pc; +#endif + r->cpsr = cpsr_read(env); + for (i = 0; i < 32; ++i) + memcpy(r->vfp_zregs[i], &env->vfp.zregs[i], sizeof(r->vfp_zregs[i])); + for (i = 0; i < 16; ++i) + r->vfp_xregs[i] = env->vfp.xregs[i]; + +} + +void afl_restore_regs(struct api_regs* r, CPUArchState* env) { + + int i; +#ifdef TARGET_AARCH64 + env->xregs[0] = r->x0; + env->xregs[1] = r->x1; + env->xregs[2] = r->x2; + env->xregs[3] = r->x3; + env->xregs[4] = r->x4; + env->xregs[5] = r->x5; + env->xregs[6] = r->x6; + env->xregs[7] = r->x7; + env->xregs[8] = r->x8; + env->xregs[9] = r->x9; + env->xregs[10] = r->x10; + env->xregs[11] = r->x11; + env->xregs[12] = r->x12; + env->xregs[13] = r->x13; + env->xregs[14] = r->x14; + env->xregs[15] = r->x15; + env->xregs[16] = r->x16; + env->xregs[17] = r->x17; + env->xregs[18] = r->x18; + env->xregs[19] = r->x19; + env->xregs[20] = r->x20; + env->xregs[21] = r->x21; + env->xregs[22] = r->x22; + env->xregs[23] = r->x23; + env->xregs[24] = r->x24; + env->xregs[25] = r->x25; + env->xregs[26] = r->x26; + env->xregs[27] = r->x27; + env->xregs[28] = r->x28; + env->xregs[29] = r->x29; + env->xregs[30] = r->x30; + env->xregs[31] = r->x31; + for (i = 0; i < 17; ++i) + memcpy(&env->vfp.pregs[i], r->vfp_pregs[i], sizeof(r->vfp_pregs[i])); +#else + env->regs[0] = r->r0; + env->regs[1] = r->r1; + env->regs[2] = r->r2; + env->regs[3] = r->r3; + env->regs[4] = r->r4; + env->regs[5] = r->r5; + env->regs[6] = r->r6; + env->regs[7] = r->r7; + env->regs[8] = r->r8; + env->regs[9] = r->r9; + env->regs[10] = r->r10; + env->regs[11] = r->r11; + env->regs[12] = r->r12; + env->regs[13] = r->r13; + env->regs[14] = r->r14; + env->regs[15] = r->r15; +#endif + env->pc = r->pc; + cpsr_write(env, r->cpsr, 0xffffffff, CPSRWriteRaw); + for (i = 0; i < 32; ++i) + memcpy(&env->vfp.zregs[i], r->vfp_zregs[i], sizeof(r->vfp_zregs[i])); + for (i = 0; i < 16; ++i) + env->vfp.xregs[i] = r->vfp_xregs[i]; + +} + #define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T) #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5) @@ -5446,6 +5635,20 @@ static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a, gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc); tmp1 = load_reg(s, a->rn); + if (gen == gen_sub_CC || /*gen == gen_add_CC ||*/ gen == gen_rsb_CC) { +#ifdef TARGET_AARCH64 + TCGv tmp1_64 = tcg_temp_new(); + TCGv tmp2_64 = tcg_temp_new(); + tcg_gen_extu_i32_i64(tmp1_64, tmp1); + tcg_gen_extu_i32_i64(tmp2_64, tmp2); + afl_gen_compcov(s->pc_curr, tmp1_64, tmp2_64, MO_32, 0); + tcg_temp_free(tmp1_64); + tcg_temp_free(tmp2_64); +#else + afl_gen_compcov(s->pc_curr, tmp1, tmp2, MO_32, 0); +#endif + } + gen(tmp1, tmp1, tmp2); tcg_temp_free_i32(tmp2); @@ -5488,6 +5691,20 @@ static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a, gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc); tmp1 = load_reg(s, a->rn); + if (gen == gen_sub_CC || /*gen == gen_add_CC ||*/ gen == gen_rsb_CC) { +#ifdef TARGET_AARCH64 + TCGv tmp1_64 = tcg_temp_new(); + TCGv tmp2_64 = tcg_temp_new(); + tcg_gen_extu_i32_i64(tmp1_64, tmp1); + tcg_gen_extu_i32_i64(tmp2_64, tmp2); + afl_gen_compcov(s->pc_curr, tmp1_64, tmp2_64, MO_32, 0); + tcg_temp_free(tmp1_64); + tcg_temp_free(tmp2_64); +#else + afl_gen_compcov(s->pc_curr, tmp1, tmp2, MO_32, 0); +#endif + } + gen(tmp1, tmp1, tmp2); tcg_temp_free_i32(tmp2); @@ -5538,6 +5755,20 @@ static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a, tmp2 = tcg_const_i32(imm); tmp1 = load_reg(s, a->rn); + if (gen == gen_sub_CC || /*gen == gen_add_CC ||*/ gen == gen_rsb_CC) { +#ifdef TARGET_AARCH64 + TCGv tmp1_64 = tcg_temp_new(); + TCGv tmp2_64 = tcg_temp_new(); + tcg_gen_extu_i32_i64(tmp1_64, tmp1); + tcg_gen_extu_i32_i64(tmp2_64, tmp2); + afl_gen_compcov(s->pc_curr, tmp1_64, tmp2_64, MO_32, 0); + tcg_temp_free(tmp1_64); + tcg_temp_free(tmp2_64); +#else + afl_gen_compcov(s->pc_curr, tmp1, tmp2, MO_32, 0); +#endif + } + gen(tmp1, tmp1, tmp2); tcg_temp_free_i32(tmp2); @@ -8614,7 +8845,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) default_exception_el(s)); return; } - + if (cond == 0xf) { /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we * choose to UNDEF. In ARMv5 and above the space is used @@ -9064,6 +9295,9 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) } dc->pc_curr = dc->base.pc_next; + + AFL_QEMU_TARGET_ARM_SNIPPET + insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b); dc->insn = insn; dc->base.pc_next += 4; @@ -9133,6 +9367,9 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) } dc->pc_curr = dc->base.pc_next; + + AFL_QEMU_TARGET_THUMB_SNIPPET + insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b); is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn); dc->base.pc_next += 2; diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build index 15318a6fa7b93..bb0b4fb621df5 100644 --- a/target/hexagon/meson.build +++ b/target/hexagon/meson.build @@ -33,8 +33,7 @@ gen_semantics = executable( semantics_generated = custom_target( 'semantics_generated.pyinc', output: 'semantics_generated.pyinc', - input: gen_semantics, - command: ['@INPUT@', '@OUTPUT@'], + command: [gen_semantics, '@OUTPUT@'], ) hexagon_ss.add(semantics_generated) @@ -54,90 +53,81 @@ hexagon_ss.add(semantics_generated) shortcode_generated = custom_target( 'shortcode_generated.h.inc', output: 'shortcode_generated.h.inc', - input: 'gen_shortcode.py', depends: [semantics_generated], depend_files: [hex_common_py, attribs_def], - command: [python, '@INPUT@', semantics_generated, attribs_def, '@OUTPUT@'], + command: [python, files('gen_shortcode.py'), semantics_generated, attribs_def, '@OUTPUT@'], ) hexagon_ss.add(shortcode_generated) helper_protos_generated = custom_target( 'helper_protos_generated.h.inc', output: 'helper_protos_generated.h.inc', - input: 'gen_helper_protos.py', depends: [semantics_generated], depend_files: [hex_common_py, attribs_def, gen_tcg_h], - command: [python, '@INPUT@', semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], + command: [python, files('gen_helper_protos.py'), semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], ) hexagon_ss.add(helper_protos_generated) tcg_funcs_generated = custom_target( 'tcg_funcs_generated.c.inc', output: 'tcg_funcs_generated.c.inc', - input: 'gen_tcg_funcs.py', depends: [semantics_generated], depend_files: [hex_common_py, attribs_def, gen_tcg_h], - command: [python, '@INPUT@', semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], + command: [python, files('gen_tcg_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], ) hexagon_ss.add(tcg_funcs_generated) tcg_func_table_generated = custom_target( 'tcg_func_table_generated.c.inc', output: 'tcg_func_table_generated.c.inc', - input: 'gen_tcg_func_table.py', depends: [semantics_generated], depend_files: [hex_common_py, attribs_def], - command: [python, '@INPUT@', semantics_generated, attribs_def, '@OUTPUT@'], + command: [python, files('gen_tcg_func_table.py'), semantics_generated, attribs_def, '@OUTPUT@'], ) hexagon_ss.add(tcg_func_table_generated) helper_funcs_generated = custom_target( 'helper_funcs_generated.c.inc', output: 'helper_funcs_generated.c.inc', - input: 'gen_helper_funcs.py', depends: [semantics_generated], depend_files: [hex_common_py, attribs_def, gen_tcg_h], - command: [python, '@INPUT@', semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], + command: [python, files('gen_helper_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], ) hexagon_ss.add(helper_funcs_generated) printinsn_generated = custom_target( 'printinsn_generated.h.inc', output: 'printinsn_generated.h.inc', - input: 'gen_printinsn.py', depends: [semantics_generated], depend_files: [hex_common_py, attribs_def], - command: [python, '@INPUT@', semantics_generated, attribs_def, '@OUTPUT@'], + command: [python, files('gen_printinsn.py'), semantics_generated, attribs_def, '@OUTPUT@'], ) hexagon_ss.add(printinsn_generated) op_regs_generated = custom_target( 'op_regs_generated.h.inc', output: 'op_regs_generated.h.inc', - input: 'gen_op_regs.py', depends: [semantics_generated], depend_files: [hex_common_py, attribs_def], - command: [python, '@INPUT@', semantics_generated, attribs_def, '@OUTPUT@'], + command: [python, files('gen_op_regs.py'), semantics_generated, attribs_def, '@OUTPUT@'], ) hexagon_ss.add(op_regs_generated) op_attribs_generated = custom_target( 'op_attribs_generated.h.inc', output: 'op_attribs_generated.h.inc', - input: 'gen_op_attribs.py', depends: [semantics_generated], depend_files: [hex_common_py, attribs_def], - command: [python, '@INPUT@', semantics_generated, attribs_def, '@OUTPUT@'], + command: [python, files('gen_op_attribs.py'), semantics_generated, attribs_def, '@OUTPUT@'], ) hexagon_ss.add(op_attribs_generated) opcodes_def_generated = custom_target( 'opcodes_def_generated.h.inc', output: 'opcodes_def_generated.h.inc', - input: 'gen_opcodes_def.py', depends: [semantics_generated], depend_files: [hex_common_py, attribs_def], - command: [python, '@INPUT@', semantics_generated, attribs_def, '@OUTPUT@'], + command: [python, files('gen_opcodes_def.py'), semantics_generated, attribs_def, '@OUTPUT@'], ) hexagon_ss.add(opcodes_def_generated) @@ -154,8 +144,7 @@ gen_dectree_import = executable( iset_py = custom_target( 'iset.py', output: 'iset.py', - input: gen_dectree_import, - command: ['@INPUT@', '@OUTPUT@'], + command: [gen_dectree_import, '@OUTPUT@'], ) hexagon_ss.add(iset_py) @@ -166,9 +155,8 @@ hexagon_ss.add(iset_py) dectree_generated = custom_target( 'dectree_generated.h.inc', output: 'dectree_generated.h.inc', - input: 'dectree.py', depends: [iset_py], - command: ['PYTHONPATH=' + meson.current_build_dir(), '@INPUT@', '@OUTPUT@'], + command: ['env', 'PYTHONPATH=' + meson.current_build_dir(), files('dectree.py'), '@OUTPUT@'], ) hexagon_ss.add(dectree_generated) diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index af1faf9342ba1..271cbf6455767 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -33,6 +33,121 @@ #include "trace-tcg.h" #include "exec/log.h" +#include "qemuafl/qasan-qemu.h" +#include "qemuafl/cpu-translate.h" +#include "qemuafl/api.h" + +#define AFL_QEMU_TARGET_I386_SNIPPET \ + if (is_persistent) { \ + \ + if (s->pc == afl_persistent_addr) { \ + \ + restore_sp_for_persistent(cpu_regs[R_ESP]); \ + \ + gen_helper_afl_persistent_routine(cpu_env); \ + \ + if (!afl_persistent_ret_addr && !persistent_exits) { \ + \ + TCGv paddr = tcg_const_tl(afl_persistent_addr); \ + tcg_gen_qemu_st_tl(paddr, cpu_regs[R_ESP], persisent_retaddr_offset, \ + _DEFAULT_MO); \ + tcg_temp_free(paddr); \ + \ + } \ + \ + } else if (afl_persistent_ret_addr && s->pc == afl_persistent_ret_addr) { \ + \ + gen_jmp_im(s, afl_persistent_addr); \ + gen_eob(s); \ + \ + } \ + \ + } + +void afl_save_regs(struct api_regs* r, CPUArchState* env) { + +#ifdef TARGET_X86_64 + r->rip = env->eip; + r->rax = env->regs[R_EAX]; + r->rbx = env->regs[R_EBX]; + r->rcx = env->regs[R_ECX]; + r->rdx = env->regs[R_EDX]; + r->rdi = env->regs[R_EDI]; + r->rsi = env->regs[R_ESI]; + r->rbp = env->regs[R_EBP]; + r->rsp = env->regs[R_ESP]; + r->r8 = env->regs[8]; + r->r9 = env->regs[9]; + r->r10 = env->regs[10]; + r->r11 = env->regs[11]; + r->r12 = env->regs[12]; + r->r13 = env->regs[13]; + r->r14 = env->regs[14]; + r->r15 = env->regs[15]; + r->rflags = env->eflags; + int i; + for (i = 0; i < CPU_NB_REGS; ++i) + memcpy(r->zmm_regs[i], &env->xmm_regs[i], sizeof(r->zmm_regs[i])); +#else + r->eip = env->eip; + r->eax = env->regs[R_EAX]; + r->ebx = env->regs[R_EBX]; + r->ecx = env->regs[R_ECX]; + r->edx = env->regs[R_EDX]; + r->edi = env->regs[R_EDI]; + r->esi = env->regs[R_ESI]; + r->ebp = env->regs[R_EBP]; + r->esp = env->regs[R_ESP]; + r->eflags = env->eflags; + int i; + for (i = 0; i < CPU_NB_REGS; ++i) + memcpy(r->xmm_regs[i], &env->xmm_regs[i], sizeof(r->xmm_regs[i])); +#endif + +} + +void afl_restore_regs(struct api_regs* r, CPUArchState* env) { + +#ifdef TARGET_X86_64 + env->eip = r->rip; + env->regs[R_EAX] = r->rax; + env->regs[R_EBX] = r->rbx; + env->regs[R_ECX] = r->rcx; + env->regs[R_EDX] = r->rdx; + env->regs[R_EDI] = r->rdi; + env->regs[R_ESI] = r->rsi; + env->regs[R_EBP] = r->rbp; + env->regs[R_ESP] = r->rsp; + env->regs[8] = r->r8; + env->regs[9] = r->r9; + env->regs[10] = r->r10; + env->regs[11] = r->r11; + env->regs[12] = r->r12; + env->regs[13] = r->r13; + env->regs[14] = r->r14; + env->regs[15] = r->r15; + env->eflags = r->rflags; + int i; + for (i = 0; i < CPU_NB_REGS; ++i) + memcpy(&env->xmm_regs[i], r->zmm_regs[i], sizeof(r->zmm_regs[i])); +#else + env->eip = r->eip; + env->regs[R_EAX] = r->eax; + env->regs[R_EBX] = r->ebx; + env->regs[R_ECX] = r->ecx; + env->regs[R_EDX] = r->edx; + env->regs[R_EDI] = r->edi; + env->regs[R_ESI] = r->esi; + env->regs[R_EBP] = r->ebp; + env->regs[R_ESP] = r->esp; + env->eflags = r->eflags; + int i; + for (i = 0; i < CPU_NB_REGS; ++i) + memcpy(&env->xmm_regs[i], r->xmm_regs[i], sizeof(r->xmm_regs[i])); +#endif + +} + #define PREFIX_REPZ 0x01 #define PREFIX_REPNZ 0x02 #define PREFIX_LOCK 0x04 @@ -1331,9 +1446,11 @@ static void gen_op(DisasContext *s1, int op, MemOp ot, int d) tcg_gen_neg_tl(s1->T0, s1->T1); tcg_gen_atomic_fetch_add_tl(s1->cc_srcT, s1->A0, s1->T0, s1->mem_index, ot | MO_LE); + afl_gen_compcov(s1->pc, s1->cc_srcT, s1->T1, ot, d == OR_EAX); tcg_gen_sub_tl(s1->T0, s1->cc_srcT, s1->T1); } else { tcg_gen_mov_tl(s1->cc_srcT, s1->T0); + afl_gen_compcov(s1->pc, s1->T0, s1->T1, ot, d == OR_EAX); tcg_gen_sub_tl(s1->T0, s1->T0, s1->T1); gen_op_st_rm_T0_A0(s1, ot, d); } @@ -1377,6 +1494,7 @@ static void gen_op(DisasContext *s1, int op, MemOp ot, int d) case OP_CMPL: tcg_gen_mov_tl(cpu_cc_src, s1->T1); tcg_gen_mov_tl(s1->cc_srcT, s1->T0); + afl_gen_compcov(s1->pc, s1->T0, s1->T1, ot, d == OR_EAX); tcg_gen_sub_tl(cpu_cc_dst, s1->T0, s1->T1); set_cc_op(s1, CC_OP_SUBB + ot); break; @@ -4127,6 +4245,14 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, goto unknown_op; } + if (b == 0xf2 && use_qasan) { + /* QASAN backdoor */ + gen_helper_qasan_fake_instr(cpu_regs[R_EAX], cpu_env, + cpu_regs[R_EAX], cpu_regs[R_EDI], + cpu_regs[R_ESI], cpu_regs[R_EDX]); + break; + } + sse_fn_eppi = sse_op_table7[b].op[b1]; if (!sse_fn_eppi) { goto unknown_op; @@ -4506,6 +4632,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) rex_w = -1; rex_r = 0; + AFL_QEMU_TARGET_I386_SNIPPET + next_byte: b = x86_ldub_code(env, s); /* Collect prefixes. */ @@ -5054,6 +5182,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_ext16u_tl(s->T0, s->T0); } next_eip = s->pc - s->cs_base; + if (__afl_cmp_map && afl_must_instrument(next_eip)) + gen_helper_afl_cmplog_rtn(cpu_env); + if (use_qasan && qasan_max_call_stack) + gen_helper_qasan_shadow_stack_push(tcg_const_tl(s->pc)); tcg_gen_movi_tl(s->T1, next_eip); gen_push_v(s, s->T1); gen_op_jmp_v(s->T0); @@ -5065,6 +5197,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_add_A0_im(s, 1 << ot); gen_op_ld_v(s, MO_16, s->T0, s->A0); do_lcall: + if (use_qasan && qasan_max_call_stack) + gen_helper_qasan_shadow_stack_push(tcg_const_tl(s->pc)); if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); gen_helper_lcall_protected(cpu_env, s->tmp2_i32, s->T1, @@ -6509,6 +6643,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) val = x86_ldsw_code(env, s); ot = gen_pop_T0(s); gen_stack_update(s, val + (1 << ot)); + if (use_qasan && qasan_max_call_stack) + gen_helper_qasan_shadow_stack_pop(s->T0); /* Note that gen_pop_T0 uses a zero-extending load. */ gen_op_jmp_v(s->T0); gen_bnd_jmp(s); @@ -6517,6 +6653,8 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0xc3: /* ret */ ot = gen_pop_T0(s); gen_pop_update(s, ot); + if (use_qasan && qasan_max_call_stack) + gen_helper_qasan_shadow_stack_pop(s->T0); /* Note that gen_pop_T0 uses a zero-extending load. */ gen_op_jmp_v(s->T0); gen_bnd_jmp(s); @@ -6530,10 +6668,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) gen_jmp_im(s, pc_start - s->cs_base); gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1), tcg_const_i32(val)); + // QASAN: TODO } else { gen_stack_A0(s); /* pop offset */ gen_op_ld_v(s, dflag, s->T0, s->A0); + if (use_qasan && qasan_max_call_stack) + gen_helper_qasan_shadow_stack_pop(s->T0); /* NOTE: keeping EIP updated is not a problem in case of exception */ gen_op_jmp_v(s->T0); @@ -6577,6 +6718,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) tval = (int16_t)insn_get(env, s, MO_16); } next_eip = s->pc - s->cs_base; + if (__afl_cmp_map && afl_must_instrument(next_eip)) + gen_helper_afl_cmplog_rtn(cpu_env); + if (use_qasan && qasan_max_call_stack) + gen_helper_qasan_shadow_stack_push(tcg_const_tl(s->pc)); tval += next_eip; if (dflag == MO_16) { tval &= 0xffff; diff --git a/target/mips/translate.c b/target/mips/translate.c index 70891c37cdd4b..84befd62a09fe 100644 --- a/target/mips/translate.c +++ b/target/mips/translate.c @@ -39,6 +39,27 @@ #include "fpu_helper.h" #include "translate.h" +/* MIPS_PATCH */ +#include "qemuafl/cpu-translate.h" + +/* MIPS_PATCH */ +#define AFL_QEMU_TARGET_MIPS_SNIPPET \ + if (is_persistent) { \ + if (ctx->base.pc_next == afl_persistent_addr) { \ + gen_helper_afl_persistent_routine(cpu_env); \ + \ + if (afl_persistent_ret_addr == 0 && !persistent_exits) { \ + tcg_gen_movi_tl(cpu_gpr[31], afl_persistent_addr); \ + } \ + \ + if (!persistent_save_gpr) afl_gen_tcg_plain_call(&afl_persistent_loop); \ + \ + } else if (afl_persistent_ret_addr && \ + ctx->base.pc_next == afl_persistent_ret_addr) { \ + gen_goto_tb(ctx, 0, afl_persistent_addr); \ + } \ + } + enum { /* indirect opcode tables */ OPC_SPECIAL = (0x00 << 26), @@ -2274,6 +2295,128 @@ static const char * const mxuregnames[] = { }; #endif +/* MIPS_PATCH */ +void afl_save_regs(struct api_regs* r, CPUArchState *env) { + int i = 0; + int j = 0; + /* GP registers saving */ + r->r0 = env->active_tc.gpr[0]; + r->at = env->active_tc.gpr[1]; + r->v0 = env->active_tc.gpr[2]; + r->v1 = env->active_tc.gpr[3]; + r->a0 = env->active_tc.gpr[4]; + r->a1 = env->active_tc.gpr[5]; + r->a2 = env->active_tc.gpr[6]; + r->a3 = env->active_tc.gpr[7]; + r->t0 = env->active_tc.gpr[8]; + r->t1 = env->active_tc.gpr[9]; + r->t2 = env->active_tc.gpr[10]; + r->t3 = env->active_tc.gpr[11]; + r->t4 = env->active_tc.gpr[12]; + r->t5 = env->active_tc.gpr[13]; + r->t6 = env->active_tc.gpr[14]; + r->t7 = env->active_tc.gpr[15]; + r->s0 = env->active_tc.gpr[16]; + r->s1 = env->active_tc.gpr[17]; + r->s2 = env->active_tc.gpr[18]; + r->s3 = env->active_tc.gpr[19]; + r->s4 = env->active_tc.gpr[20]; + r->s5 = env->active_tc.gpr[21]; + r->s6 = env->active_tc.gpr[22]; + r->s7 = env->active_tc.gpr[23]; + r->t8 = env->active_tc.gpr[24]; + r->t9 = env->active_tc.gpr[25]; + r->k0 = env->active_tc.gpr[26]; + r->k1 = env->active_tc.gpr[27]; + r->gp = env->active_tc.gpr[28]; + r->sp = env->active_tc.gpr[29]; + r->fp = env->active_tc.gpr[30]; + r->ra = env->active_tc.gpr[31]; + r->PC = env->active_tc.PC; +#if defined(TARGET_MIPS64) + memcpy(r->gpr_hi, env->active_tc.gpr_hi, sizeof(r->gpr_hi)); +#endif + for (i = 0; i < MIPS_DSP_ACC; i++) { + r->HI[i] = env->active_tc.HI[i]; + r->LO[i] = env->active_tc.LO[i]; + } + /* FP registers saving */ + for (i = 0; i < 32; i++) { + r->fpr[i].fd = env->active_fpu.fpr[i].fd; + for (j = 0; j < 2; j++) { + r->fpr[i].fs[j] = env->active_fpu.fpr[i].fs[j]; + } + r->fpr[i].d = env->active_fpu.fpr[i].d; + for (j = 0; j < 2; j++) { + r->fpr[i].w[j] = env->active_fpu.fpr[i].w[j]; + } + for (j = 0; j < MSA_WRLEN / 8; j++) { + r->fpr[i].wr.b[j] = env->active_fpu.fpr[i].wr.b[j]; + } + } +} + +/* MIPS_PATCH */ +void afl_restore_regs(struct api_regs* r, CPUArchState *env) { + int i = 0; + int j = 0; + /* GP registers restoring */ + env->active_tc.gpr[0] = r->r0; + env->active_tc.gpr[1] = r->at; + env->active_tc.gpr[2] = r->v0; + env->active_tc.gpr[3] = r->v1; + env->active_tc.gpr[4] = r->a0; + env->active_tc.gpr[5] = r->a1; + env->active_tc.gpr[6] = r->a2; + env->active_tc.gpr[7] = r->a3; + env->active_tc.gpr[8] = r->t0; + env->active_tc.gpr[9] = r->t1; + env->active_tc.gpr[10] = r->t2; + env->active_tc.gpr[11] = r->t3; + env->active_tc.gpr[12] = r->t4; + env->active_tc.gpr[13] = r->t5; + env->active_tc.gpr[14] = r->t6; + env->active_tc.gpr[15] = r->t7; + env->active_tc.gpr[16] = r->s0; + env->active_tc.gpr[17] = r->s1; + env->active_tc.gpr[18] = r->s2; + env->active_tc.gpr[19] = r->s3; + env->active_tc.gpr[20] = r->s4; + env->active_tc.gpr[21] = r->s5; + env->active_tc.gpr[22] = r->s6; + env->active_tc.gpr[23] = r->s7; + env->active_tc.gpr[24] = r->t8; + env->active_tc.gpr[25] = r->t9; + env->active_tc.gpr[26] = r->k0; + env->active_tc.gpr[27] = r->k1; + env->active_tc.gpr[28] = r->gp; + env->active_tc.gpr[29] = r->sp; + env->active_tc.gpr[30] = r->fp; + env->active_tc.gpr[31] = r->ra; + env->active_tc.PC = r->PC; +#if defined(TARGET_MIPS64) + memcpy(env->active_tc.gpr_hi, r->gpr_hi, sizeof(r->gpr_hi)); +#endif + for (i = 0; i < MIPS_DSP_ACC; i++) { + env->active_tc.HI[i] = r->HI[i]; + env->active_tc.LO[i] = r->LO[i]; + } + /* FP registers restoring */ + for (i = 0; i < 32; i++) { + env->active_fpu.fpr[i].fd = r->fpr[i].fd; + for (j = 0; j < 2; j++) { + env->active_fpu.fpr[i].fs[j] = r->fpr[i].fs[j]; + } + env->active_fpu.fpr[i].d = r->fpr[i].d; + for (j = 0; j < 2; j++) { + env->active_fpu.fpr[i].w[j] = r->fpr[i].w[j]; + } + for (j = 0; j < MSA_WRLEN / 8; j++) { + env->active_fpu.fpr[i].wr.b[j] = r->fpr[i].wr.b[j]; + } + } +} + /* General purpose registers moves. */ void gen_load_gpr(TCGv t, int reg) { @@ -29090,6 +29233,9 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) int insn_bytes; int is_slot; + /* MIPS_PATCH */ + AFL_QEMU_TARGET_MIPS_SNIPPET + is_slot = ctx->hflags & MIPS_HFLAG_BMASK; if (ctx->insn_flags & ISA_NANOMIPS32) { ctx->opcode = translator_lduw(env, ctx->base.pc_next); diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 0984ce637be90..7bc75e36a2d66 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -37,6 +37,47 @@ #include "exec/log.h" #include "qemu/atomic128.h" +#include "qemuafl/cpu-translate.h" + +#define AFL_QEMU_TARGET_PPC_SNIPPET \ + if (is_persistent) { \ + \ + if (ctx->base.pc_next == afl_persistent_addr) { \ + \ + gen_helper_afl_persistent_routine(cpu_env); \ + \ + if (afl_persistent_ret_addr == 0 && !persistent_exits) { \ + \ + tcg_gen_movi_i32(cpu_lr, afl_persistent_addr); \ + \ + } \ + \ + if (!persistent_save_gpr) afl_gen_tcg_plain_call(&afl_persistent_loop); \ + \ + } else if (afl_persistent_ret_addr && \ + ctx->base.pc_next == afl_persistent_ret_addr) { \ + \ + gen_setlr(ctx, afl_persistent_addr); \ + gen_bclr(ctx); \ + \ + } \ + \ + } + +void afl_save_regs(struct api_regs* r, CPUArchState* env) { + memcpy(r->gpr, env->gpr, sizeof(r->gpr)); + r->lr = env->lr; + r->ctr = env->ctr; + memcpy(r->crf, env->crf, sizeof(r->crf)); +} + +void afl_restore_regs(struct api_regs* r, CPUArchState* env) { + memcpy(env->gpr, r->gpr, sizeof(r->gpr)); + env->lr = r->lr; + env->ctr = r->ctr; + memcpy(env->crf, r->crf, sizeof(r->crf)); +} + #define CPU_SINGLE_STEP 0x1 #define CPU_BRANCH_STEP 0x2 @@ -8002,6 +8043,10 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n", ctx->base.pc_next, ctx->mem_idx, (int)msr_ir); +#if defined(TARGET_PPC) + AFL_QEMU_TARGET_PPC_SNIPPET +#endif + ctx->opcode = translator_ldl_swap(env, ctx->base.pc_next, need_byteswap(ctx)); diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 70475773f4574..e42766c20df87 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -32,6 +32,25 @@ #include "trace/mem.h" #include "exec/plugin-gen.h" +#include "qemuafl/qasan-qemu.h" + +#define GEN_QASAN_OP(OP) \ +void qasan_gen_##OP(TCGv addr, int off) { \ + \ + if (use_qasan && cur_block_is_good) \ + gen_helper_qasan_##OP(cpu_env, addr); \ + \ +} + +GEN_QASAN_OP(load1) +GEN_QASAN_OP(load2) +GEN_QASAN_OP(load4) +GEN_QASAN_OP(load8) +GEN_QASAN_OP(store1) +GEN_QASAN_OP(store2) +GEN_QASAN_OP(store4) +GEN_QASAN_OP(store8) + /* Reduce the number of ifdefs below. This assumes that all uses of TCGV_HIGH and TCGV_LOW are properly protected by a conditional that the compiler can eliminate. */ @@ -2838,9 +2857,18 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } addr = plugin_prep_mem_callbacks(addr); + + switch (memop & MO_SIZE) { + case MO_64: qasan_gen_load8(addr, idx); break; + case MO_32: qasan_gen_load4(addr, idx); break; + case MO_16: qasan_gen_load2(addr, idx); break; + case MO_8: qasan_gen_load1(addr, idx); break; + default: qasan_gen_load4(addr, idx); break; + } + gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); plugin_gen_mem_callbacks(addr, info); - + if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { case MO_16: @@ -2885,6 +2913,15 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } addr = plugin_prep_mem_callbacks(addr); + + switch (memop & MO_SIZE) { + case MO_64: qasan_gen_store8(addr, idx); break; + case MO_32: qasan_gen_store4(addr, idx); break; + case MO_16: qasan_gen_store2(addr, idx); break; + case MO_8: qasan_gen_store1(addr, idx); break; + default: qasan_gen_store4(addr, idx); break; + } + if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { gen_ldst_i32(INDEX_op_qemu_st8_i32, val, addr, memop, idx); } else { @@ -2927,6 +2964,15 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } addr = plugin_prep_mem_callbacks(addr); + + switch (memop & MO_SIZE) { + case MO_64: qasan_gen_load8(addr, idx); break; + case MO_32: qasan_gen_load4(addr, idx); break; + case MO_16: qasan_gen_load2(addr, idx); break; + case MO_8: qasan_gen_load1(addr, idx); break; + default: qasan_gen_load8(addr, idx); break; + } + gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); plugin_gen_mem_callbacks(addr, info); @@ -2990,6 +3036,15 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } addr = plugin_prep_mem_callbacks(addr); + + switch (memop & MO_SIZE) { + case MO_64: qasan_gen_store8(addr, idx); break; + case MO_32: qasan_gen_store4(addr, idx); break; + case MO_16: qasan_gen_store2(addr, idx); break; + case MO_8: qasan_gen_store1(addr, idx); break; + default: qasan_gen_store8(addr, idx); break; + } + gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); plugin_gen_mem_callbacks(addr, info); diff --git a/tcg/tcg.c b/tcg/tcg.c index 63a12b197bff1..7d1282bd39d24 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -66,6 +66,8 @@ #include "exec/log.h" #include "sysemu/sysemu.h" +#include "qemuafl/common.h" + /* Forward declarations for functions declared in tcg-target.c.inc and used here. */ static void tcg_target_init(TCGContext *s); @@ -1932,6 +1934,17 @@ bool tcg_op_supported(TCGOpcode op) } } +void afl_gen_tcg_plain_call(void *func) +{ + TCGOp *op = tcg_emit_op(INDEX_op_call); + + TCGOP_CALLO(op) = 0; + + op->args[0] = (uintptr_t)func; + op->args[1] = 0; + TCGOP_CALLI(op) = 0; +} + /* Note: we convert the 64 bit args to 32 bit and do some alignment and endian swap. Maybe it would be better to do the alignment and endian swap in tcg_reg_alloc_call(). */