diff -uNr src.git-mirror/sys/arch/amd64/conf/GENERIC_KASAN src.git/sys/arch/amd64/conf/GENERIC_KASAN --- src.git-mirror/sys/arch/amd64/conf/GENERIC_KASAN 1970-01-01 01:00:00.000000000 +0100 +++ src.git/sys/arch/amd64/conf/GENERIC_KASAN 2018-08-17 18:36:27.354216686 +0200 @@ -0,0 +1,6 @@ +# $NetBSD$ + +include "arch/amd64/conf/GENERIC" + +makeoptions KASAN=1 # Kernel ASLR +options KASAN diff -uNr src.git-mirror/sys/arch/amd64/conf/Makefile.amd64 src.git/sys/arch/amd64/conf/Makefile.amd64 --- src.git-mirror/sys/arch/amd64/conf/Makefile.amd64 2018-08-17 18:51:27.222207262 +0200 +++ src.git/sys/arch/amd64/conf/Makefile.amd64 2018-08-17 18:36:27.385126960 +0200 @@ -45,8 +45,8 @@ # CFLAGS+= -mno-fp-ret-in-387 .if ${SPECTRE_V2_GCC_MITIGATION:U0} > 0 && ${HAVE_GCC:U0} > 0 -CFLAGS+= -mindirect-branch=thunk-inline -CFLAGS+= -mindirect-branch-register +CFLAGS+= -mindirect-branch=thunk-inline +CFLAGS+= -mindirect-branch-register .endif ## diff -uNr src.git-mirror/sys/conf/Makefile.kern.inc src.git/sys/conf/Makefile.kern.inc --- src.git-mirror/sys/conf/Makefile.kern.inc 2018-08-17 18:51:27.994401236 +0200 +++ src.git/sys/conf/Makefile.kern.inc 2018-08-17 18:36:31.320848171 +0200 @@ -108,6 +108,11 @@ CFLAGS+= -fno-common .endif +# Kernel Undefined Behavior Sanitizer options +.if defined(KASAN) && ${ACTIVE_CC} == "gcc" +CFLAGS+= -fsanitize=kernel-address +.endif + # Use the per-source COPTS variables to add -g to just those # files that match the shell patterns given in ${DEBUGLIST} # diff -uNr src.git-mirror/sys/kern/files.kern src.git/sys/kern/files.kern --- src.git-mirror/sys/kern/files.kern 2018-08-17 18:52:37.565862663 +0200 +++ src.git/sys/kern/files.kern 2018-08-17 18:37:26.740065701 +0200 @@ -218,3 +218,10 @@ file miscfs/genfs/layer_vnops.c layerfs file miscfs/specfs/spec_vnops.c vfs + +define kasan +defflag opt_kubsan.h KASAN +file kern/kern_asan.c kasan +file kern/kern_asan_report.c kasan +file kern/kern_asan_quarantine.c kasan +file kern/kern_asan_init.c kasan diff -uNr src.git-mirror/sys/kern/kern_asan.c src.git/sys/kern/kern_asan.c --- src.git-mirror/sys/kern/kern_asan.c 1970-01-01 01:00:00.000000000 +0100 +++ src.git/sys/kern/kern_asan.c 2018-08-17 18:37:26.746266587 +0200 @@ -0,0 +1,1251 @@ +#include +__KERNEL_RCSID(0, "$NetBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define _RET_IP_ (unsigned long)__builtin_return_address(0) + +//Typedefs for current version +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef uint8_t __u8; +typedef uint16_t __u16; +typedef uint32_t __u32; +typedef uint64_t __u64; + +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; + +typedef int8_t __s8; +typedef int16_t __s16; +typedef int32_t __s32; +typedef int64_t __s64; + +typedef uint16_t __le16; +typedef uint32_t __le32; +typedef uint64_t __le64; + +typedef uint16_t __be16; +typedef uint32_t __be32; +typedef uint64_t __be64; +//End of typedefs + + +#define IS_ALIGNED(x, a)(((x) & ((typeof(x))(a) - 1)) == 0) +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) +#define THREAD_SIZE 4086 + +/* I don't see a __memory_barrier in linux +#ifndef barrier +# define barrier() __memory_barrier() +#endif +*/ +/* Barriers were removed */ +#define __READ_ONCE_SIZE \ +({ \ + switch (size) { \ + case 1: *(__u8 *)res = *(__u8 *)p; break; \ + case 2: *(__u16 *)res = *(__u16 *)p; break; \ + case 4: *(__u32 *)res = *(__u32 *)p; break; \ + case 8: *(__u64 *)res = *(__u64 *)p; break; \ + default: \ + __builtin_memcpy((void *)res, (void *)p, size); \ + } \ +}) + +static __always_inline +void __read_once_size(void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} + +//static __no_kasan_or_inline +static __always_inline +void __read_once_size_nocheck(void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} + +// smp_read_barrier_depends(); +// Above line is not added. +#define __READ_ONCE(x, check) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u; \ + if (check) \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + else \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) +#define READ_ONCE(x) __READ_ONCE(x, 1) + + +#define SLAB_KASAN 100 //temp +#define SLAB_TYPESAFE_BY_RCU 100 //temp +#define KMALLOC_MAX_SIZE 100 //temp +#define ZERO_SIZE_PTR (void *)100 //temp +#define GFP_NOWAIT 100 //temp +#define VM_KASAN 100 //temp + + +/* Function declarations for KASAN Functions */ +void kasan_check_read(const volatile void *, unsigned int); +void * task_stack_page(struct lwp * ); +void kasan_check_write(const volatile void *, unsigned int); + + +/* End of Function declarations for KASAN Functions */ + +/* +void kasan_enable_current(void) +{ + kasan_depth++; +} + +void kasan_disable_current(void) +{ + kasan_depth--; +} +*/ + +/* + * Dummy functions for timebeing + */ + +bool PageHighMem(struct page *); +bool PageHighMem(struct page *Page) { + return true; +} + +bool PageSlab(struct page *); +bool PageSlab(struct page *Page) { + return true; +} + +void * page_address(struct page *); +void * page_address(struct page *Page) { + return (void *)0; +} + +struct page * virt_to_page(const void *); +struct page * virt_to_page(const void *test) { + return (struct page *)0; +} + +struct page * virt_to_head_page(const void *); +struct page * virt_to_head_page(const void *test) { + return (struct page *)0; +} + +int compound_order(struct page *); +int compound_order(struct page *Page) { + return 0; +} + +void * nearest_obj(struct pool_cache *, struct page *, void *); +void * nearest_obj(struct pool_cache *cache, struct page *Page, void *obj) +{ + return (void *)0; +} +/* + * End of Dummy functions + */ + +/* + * Start of NetBSD kernel alternatives + */ +/* + * Used to return the page mapping the stack of a lwp + */ +void * task_stack_page(struct lwp *task) { + struct pcb *pb = lwp_getpcb(task); + return (void *)pb->pcb_rbp; +} +/* + * End of NetBSD kernel alternatives + */ + + +/* + * Poisons the shadow memory for 'size' bytes starting from 'addr'. + * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. + */ +static void +kasan_poison_shadow(const void *address, size_t size, u8 value) +{ + void *shadow_start, *shadow_end; + + /* + * Find the shadow offsets of the start and end address + */ + shadow_start = kasan_mem_to_shadow(address); + shadow_end = kasan_mem_to_shadow((void *)((uintptr_t)address + + size)); + + /* + * Use memset to populate the region with the given value + */ + __builtin_memset(shadow_start, value, (char *)shadow_end - + (char *)shadow_start); +} + +/* + * unpoisons the shadow memory for 'size' bytes starting from 'addr'. + * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. + */ +void +kasan_unpoison_shadow(const void *address, size_t size) +{ + kasan_poison_shadow(address, size, 0); + + if (size & KASAN_SHADOW_MASK) { + u8 *shadow = (u8 *)kasan_mem_to_shadow((void *) + ((uintptr_t)address + size)); + *shadow = size & KASAN_SHADOW_MASK; + } +} + +/* Unpoison the stack from the page with the stack base */ +static void +__kasan_unpoison_stack(struct lwp *task, const void *sp) +{ + void *base = task_stack_page(task); + size_t size = (const char *)sp - (const char *)base; + + kasan_unpoison_shadow(base, size); +} + +/* Unpoison the entire stack for a task. */ +void +kasan_unpoison_task_stack(struct lwp *task) +{ + __kasan_unpoison_stack(task,(void *) + ((uintptr_t)task_stack_page(task) + THREAD_SIZE)); +} + +/* Unpoison the stack for the current task beyond a watermark sp value. */ +void +kasan_unpoison_task_stack_below(const void *watermark) +{ + + /* Calculate the task stack base address. */ + void *base = (void *)((unsigned long)watermark & + ~(THREAD_SIZE - 1)); + + kasan_unpoison_shadow(base, (const char *)watermark - + (char *)base); +} + +/* + * Clear all poison for the region between the current SP and a provided + * watermark value, as is sometimes required prior to hand-crafted + * asm function returns in the middle of functions. + */ +void +kasan_unpoison_stack_above_sp_to(const void *watermark) +{ + const void *sp = __builtin_frame_address(0); + size_t size = (const char *)watermark - (const char *)sp; + + /* Make sure that sp is below the watermark */ +// if (KASSERT(sp <= watermark)) +// if (KASSERT((int64_t)size < 0 )) +// return; + kasan_unpoison_shadow(sp, size); +} + +/* + * All functions below always inlined so compiler could + * perform better optimizations in each of __asan_loadX/__assn_storeX + * depending on memory access size X. + */ + +static __always_inline bool +memory_is_poisoned_1(unsigned long addr) +{ + s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); + + if (__predict_false(shadow_value)) { + s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; + return __predict_false(last_accessible_byte >= + shadow_value); + } + + return false; +} + +static __always_inline bool +memory_is_poisoned_2_4_8(unsigned long addr, + unsigned long size) +{ + u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); + + /* + * Access crosses 8(shadow size)-byte boundary. Such access maps + * into 2 shadow bytes, so we need to check them both. + */ + if (__predict_false(((addr + size - 1) & KASAN_SHADOW_MASK) + < size - 1)) + return *shadow_addr || memory_is_poisoned_1(addr + + size - 1); + + return memory_is_poisoned_1(addr + size - 1); +} + +static __always_inline bool +memory_is_poisoned_16(unsigned long addr) +{ + u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); + + /* Unaligned 16-bytes access maps into 3 shadow bytes. */ + if (__predict_false(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) + return *shadow_addr || memory_is_poisoned_1(addr + 15); + + return *shadow_addr; +} + +/* Function to check whether a set of bytes is not zero */ +static __always_inline unsigned long +bytes_is_nonzero(const u8 *start, size_t size) +{ + while (size) { + /* If the byte is not zero return the value */ + if (__predict_false(*start)) + return (unsigned long)start; + start++; + size--; + } + + return 0; +} + +/* + * Function to make sure whether a range of memory in the shadow region is + * non zero to check whether the action being perfomed is legal. + */ +static __always_inline unsigned long +memory_is_nonzero(const void *start, const void *end) +{ + unsigned int words; + unsigned long ret; + unsigned int prefix = (const unsigned long)start % 8; + + /* + * If the size is less that 16 bytes then use bytes_is_nonzero + * since we don't need to care about the allignment at all. + */ + if ((const char *)end - (const char *)start <= 16) + return bytes_is_nonzero(start,(unsigned long) + ((const char *)end - (const char *)start)); + + /* Check the non aligned bytes and check if they are non zero. */ + if (prefix) { + prefix = 8 - prefix; + ret = bytes_is_nonzero(start, prefix); + if (__predict_false(ret)) + return ret; + start =(void *)((uintptr_t)start + (uintptr_t)prefix); + } + + /* Check the memory region by taking chunks of 8 bytes each time */ + words = ((const char *)end - (const char *)start) / 8; + while (words) { + if (__predict_false(*(const u64 *)start)) + return bytes_is_nonzero(start, 8); + start =(void *)((uintptr_t)start + (uintptr_t)8); + words--; + } + + return bytes_is_nonzero(start, (unsigned long)((const char *)end - + (const char *)start) % 8); +} + +/* + * Function to make sure that n bytes of memory in the shadow region are + * poisoned according to the request. + */ +static __always_inline bool +memory_is_poisoned_n(unsigned long addr, size_t size) +{ + unsigned long ret; + + /* Check if the memory region is non zero */ + ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr), + (char *)kasan_mem_to_shadow((void *)(addr + + size - 1)) + 1); + + /* If the memory seems to be poisoned (non zero) */ + if (__predict_false(ret)) { + /* take the last byte and its corresponding shadow offset */ + unsigned long last_byte = addr + size - 1; + s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); + + /* if */ + if (__predict_false(ret != (unsigned long)last_shadow || + ((long)(last_byte & KASAN_SHADOW_MASK) + >= *last_shadow))) + return true; + } + return false; +} + +/* + * Function to decide and call the corresponding function to check the + * poisoning based on the size that was given + */ +static __always_inline bool +memory_is_poisoned(unsigned long addr, size_t size) +{ + if (__builtin_constant_p(size)) { + switch (size) { + case 1: + return memory_is_poisoned_1(addr); + case 2: + case 4: + case 8: + return memory_is_poisoned_2_4_8(addr, size); + case 16: + return memory_is_poisoned_16(addr); + default: + KASSERT(0 && "Not reached"); + } + } + + return memory_is_poisoned_n(addr, size); +} + +/* + * Inline function used to check whether the given memory access is + * proper. + */ +static __always_inline void +check_memory_region_inline(unsigned long addr, size_t size, bool write, + unsigned long ret_ip) +{ + + if (__predict_false(size == 0)) + return; + + /* Check if the address is a valid kernel address */ + if (__predict_false((void *)addr < + kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { + kasan_report(addr, size, write, ret_ip); + return; + } + + /* If the memory is not poisoned then return normally */ + if (__predict_true(!memory_is_poisoned(addr, size))) + return; + + /* Bug found, proceed to report the bug */ + kasan_report(addr, size, write, ret_ip); + +} + + +static void +check_memory_region(unsigned long addr, size_t size, bool write, + unsigned long ret_ip) +{ + check_memory_region_inline(addr, size, write, ret_ip); +} + +void +kasan_check_read(const volatile void *p, unsigned int size) +{ + check_memory_region((unsigned long)p, size, false, _RET_IP_); +} + +void +kasan_check_write(const volatile void *p, unsigned int size) +{ + check_memory_region((unsigned long)p, size, true, _RET_IP_); +} +/* +MULTIPLE DEFINITION errors while linking here +#undef memset +void *memset(void *addr, int c, size_t len) +{ + check_memory_region((unsigned long)addr, len, true, _RET_IP_); + + return __builtin_memset(addr, c, len); +} +#undef memmove +void *memmove(void *dest, const void *src, size_t len) +{ + check_memory_region((unsigned long)src, len, false, _RET_IP_); + check_memory_region((unsigned long)dest, len, true, _RET_IP_); + + return __memmove(dest, src, len); +} + +#undef memcpy +void *memcpy(void *dest, const void *src, size_t len) +{ + check_memory_region((unsigned long)src, len, false, _RET_IP_); + check_memory_region((unsigned long)dest, len, true, _RET_IP_); + + return __builtin_memcpy(dest, src, len); +} +*/ + +/* + * Function to unpoison the shadow offset of a page which is being allocated + * only if it is not a Highmem page (Not applicable for amd64) + */ +void +kasan_alloc_pages(struct page *page, unsigned int order) +{ + if (__predict_true(!PageHighMem(page))) + kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); +} + +/* + * Function to poison the shadow offset of a page which is being freed + * only if it is not a Highmem page (Not applicable for amd64) + */ +void +kasan_free_pages(struct page *page, unsigned int order) +{ + if (__predict_true(!PageHighMem(page))) + kasan_poison_shadow(page_address(page), PAGE_SIZE << order, + KASAN_FREE_PAGE); +} + +/* + * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. + * For larger allocations larger redzones are used. + */ +static unsigned int +optimal_redzone(unsigned int object_size) +{ + return + object_size <= 64 - 16 ? 16 : + object_size <= 128 - 32 ? 32 : + object_size <= 512 - 64 ? 64 : + object_size <= 4096 - 128 ? 128 : + object_size <= (1 << 14) - 256 ? 256 : + object_size <= (1 << 15) - 512 ? 512 : + object_size <= (1 << 16) - 1024 ? 1024 : 2048; +} + +/* + * Function to initialize the kasan_info struct inside the pool_cache + * struct used by kmem(8) and pool_cache(8) allocators with the details of + * the allocation. + */ +void +kasan_cache_create(struct pool_cache *cache, size_t *size, + unsigned int *flags) +{ + unsigned int orig_size = *size; + int redzone_adjust; + + /* Add alloc meta. */ + cache->kasan_info.alloc_meta_offset = *size; + *size += sizeof(struct kasan_alloc_meta); + + /* Add free meta. */ + if (cache->pc_pool.pr_flags & SLAB_TYPESAFE_BY_RCU || //cache->ctor || + cache->pc_reqsize < sizeof(struct kasan_free_meta)) { + cache->kasan_info.free_meta_offset = *size; + *size += sizeof(struct kasan_free_meta); + } + redzone_adjust = optimal_redzone(cache->pc_reqsize) - + (*size - cache->pc_reqsize); + + if (redzone_adjust > 0) + *size += redzone_adjust; + + *size = min(KMALLOC_MAX_SIZE, + max(*size, cache->pc_reqsize + + optimal_redzone(cache->pc_reqsize))); + + /* + * If the metadata doesn't fit, don't enable KASAN at all. + */ + if (*size <= cache->kasan_info.alloc_meta_offset || + *size <= cache->kasan_info.free_meta_offset) { + cache->kasan_info.alloc_meta_offset = 0; + cache->kasan_info.free_meta_offset = 0; + *size = orig_size; + return; + } + + *flags |= SLAB_KASAN; +} + +/* + * Functions to be called by the Pagedaemon since there are no shrink and + * shutdown functions in the cache allocator. Will add after the quarantine + * list feature is ready + */ + +/* +void +kasan_cache_shrink(struct pool_cache *cache) +{ + quarantine_remove_cache(cache); +} + +void +kasan_cache_shutdown(struct pool_cache *cache) +{ + if (!__pool_cache_empty(cache)) + quarantine_remove_cache(cache); +} +*/ + +/* + * Function to return the total size of the alloc and free meta structure + * Returns 0 if the structres don't exist + */ +size_t +kasan_metadata_size(struct pool_cache *cache) +{ + return (cache->kasan_info.alloc_meta_offset ? + sizeof(struct kasan_alloc_meta) : 0) + + (cache->kasan_info.free_meta_offset ? + sizeof(struct kasan_free_meta) : 0); +} + +/* + * Function to poison a pool of cache memory. The function needs renaming. + */ +void +kasan_poison_slab(struct page *page) +{ + kasan_poison_shadow(page_address(page), + PAGE_SIZE << compound_order(page), + KASAN_KMALLOC_REDZONE); +} + +/* + * Function to unpoison a object of memory that is allocated by the allocator - + * in this case the pool_cache or the kmem allocator + */ +void +kasan_unpoison_object_data(struct pool_cache *cache, void *object) +{ + kasan_unpoison_shadow(object, cache->pc_reqsize); +} + +/* + * Function to poison a object of memory that is allocated by the allocator - + * in this case the pool_cache or the kmem allocator + */ +void +kasan_poison_object_data(struct pool_cache *cache, void *object) +{ + kasan_poison_shadow(object, + round_up(cache->pc_reqsize, KASAN_SHADOW_SCALE_SIZE), + KASAN_KMALLOC_REDZONE); +} + +/* + * Set of Functions to handle irq stacks - will be ported later + */ +/* +static inline int +in_irqentry_text(unsigned long ptr) +{ + return (ptr >= (unsigned long)&__irqentry_text_start && + ptr < (unsigned long)&__irqentry_text_end) || + (ptr >= (unsigned long)&__softirqentry_text_start && + ptr < (unsigned long)&__softirqentry_text_end); +} + +static inline void +filter_irq_stacks(struct stack_trace *trace) +{ + int i; + + if (!trace->nr_entries) + return; + for (i = 0; i < trace->nr_entries; i++) + if (in_irqentry_text(trace->entries[i])) { +*/ /* Include the irqentry function into the stack. */ +/* trace->nr_entries = i + 1; + break; + } +} + +static inline depot_stack_handle_t +save_stack(unsigned int flags) +{ + unsigned long entries[KASAN_STACK_DEPTH]; + struct stack_trace trace = { + .nr_entries = 0, + .entries = entries, + .max_entries = KASAN_STACK_DEPTH, + .skip = 0 + }; + + save_stack_trace(&trace); + filter_irq_stacks(&trace); + if (trace.nr_entries != 0 && + trace.entries[trace.nr_entries-1] == ULONG_MAX) + trace.nr_entries--; + + return depot_save_stack(&trace, flags); +} +*/ + +/* Function to set the pid and stack in the kasan_track structure */ +static inline void +set_track(struct kasan_track *track, unsigned int flags) +{ +/* + * Haven't decided on how to proceed with this yet. + * + * track->pid = current->pid; + * track->stack = save_stack(flags); + */ +} + +/* + * Function to retrieve address of the structure which contains the details of + * allocation. + */ +struct kasan_alloc_meta +*get_alloc_info(struct pool_cache *cache, const void *object) +{ + KASSERT(sizeof(struct kasan_alloc_meta) > 32); + return (void *)((char *)__UNCONST(object) + + cache->kasan_info.alloc_meta_offset); +} + +/* + * Function to retrieve address of the structure which contains the details of + * the memory which was freed. + */ +struct kasan_free_meta +*get_free_info(struct pool_cache *cache, const void *object) +{ + KASSERT(sizeof(struct kasan_free_meta) > 32); + return (void *)((char *)__UNCONST(object) + + cache->kasan_info.free_meta_offset); +} + +/* + * Function allocates a structure of alloc info in the address pointed by the + * get_alloc_info function. Requires renaming and possible flag change. + */ +void +kasan_init_slab_obj(struct pool_cache *cache, const void *object) +{ + struct kasan_alloc_meta *alloc_info; + + /* If the cache already has a alloc info struct? */ + if (!(cache->pc_pool.pr_flags & SLAB_KASAN)) + return; + + alloc_info = get_alloc_info(cache, object); + __builtin_memset(alloc_info, 0, sizeof(*alloc_info)); +} + +/* Allocate memory for the slab using kasan_kmalloc */ +void +kasan_slab_alloc(struct pool_cache *cache, void *object, unsigned int flags) +{ + kasan_kmalloc(cache, object, cache->pc_reqsize, flags); +} + +static bool +__kasan_slab_free(struct pool_cache *cache, void *object, + unsigned long ip, bool quarantine) +{ + s8 shadow_byte; + unsigned long rounded_up_size; + + /* + * Check if it was a invalid free by checking whether the object was + * a part of the cache. Will need to rethink this. + */ + if (__predict_false(nearest_obj(cache, virt_to_head_page(object), + object) !=object)) { +// kasan_report_invalid_free(object, ip); + return true; + } + + /* RCU slabs could be legally used after free within the RCU period */ + if (__predict_false(cache->pc_pool.pr_flags & SLAB_TYPESAFE_BY_RCU)) + return false; + + /* + * If the memory wasn't posioned then it means that it is a invalid + * free of memory since the memory has never been allocated. + */ + shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object)); + if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) { +// kasan_report_invalid_free(object, ip); + return true; + } + + /* + * Poison the object since it is not usable anymore as it has been + * freed. Poison is done according to the size rounded up to the kasan + * scale (2 << 8) + */ + rounded_up_size = round_up(cache->pc_reqsize, KASAN_SHADOW_SCALE_SIZE); + kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); + + if (!quarantine || __predict_false(!(cache->pc_pool.pr_flags & + SLAB_KASAN))) + return false; + + /* + * Set the kasan_track structure and proceed to put the object in the + * quarantine list. + */ + set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT); +// quarantine_put(get_free_info(cache, object), cache); + return true; +} + +/* Wrapper function for slab free - subject to change*/ +bool +kasan_slab_free(struct pool_cache *cache, void *object, unsigned long ip) +{ + return __kasan_slab_free(cache, object, ip, true); +} + +/* Kasan implementation of kmalloc */ +void +kasan_kmalloc(struct pool_cache *cache, const void *object, size_t size, + unsigned int flags) +{ + unsigned long redzone_start; + unsigned long redzone_end; +/* look at quarantine later + if (gfpflags_allow_blocking(flags)) + quarantine_reduce(); +*/ + if (__predict_false(object == NULL)) + return; + + /* Caluclate redzone to catch oveflows */ + redzone_start = round_up(((unsigned long)object + size), + KASAN_SHADOW_SCALE_SIZE); + redzone_end = round_up((unsigned long)object + cache->pc_reqsize, + KASAN_SHADOW_SCALE_SIZE); + + /* Unpoison the memory of the object and posion the redzone region */ + kasan_unpoison_shadow(object, size); + kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, + KASAN_KMALLOC_REDZONE); + + /* Need to reimplement the flag here */ + if (cache->pc_pool.pr_flags & SLAB_KASAN) + set_track(&get_alloc_info(cache, object)->alloc_track, flags); +} + +/* + * kmalloc for large allocations - Might need to rethink this since the pool + * cache allocator seems to go for a direct allocation with the pool allocator + * for large memories. + */ +void +kasan_kmalloc_large(const void *ptr, size_t size, unsigned int flags) +{ + struct page *page; + unsigned long redzone_start; + unsigned long redzone_end; +/* + if (gfpflags_allow_blocking(flags)) + quarantine_reduce(); +*/ + if (__predict_false(ptr == NULL)) + return; + + /* Calculate the redzone offsets to catch overflows */ + page = virt_to_page(ptr); + redzone_start = round_up(((unsigned long)ptr + size), + KASAN_SHADOW_SCALE_SIZE); + redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); + + /* Unpoison the object and poison the redzone */ + kasan_unpoison_shadow(ptr, size); + kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, + KASAN_PAGE_REDZONE); +} + +/* kasan implementation of krealloc */ +void +kasan_krealloc(const void *object, size_t size, unsigned int flags) +{ + struct page *page; + + if (__predict_false(object == ZERO_SIZE_PTR)) + return; + + page = virt_to_head_page(object); + + /* Need to decide when to call kmalloc and kmalloc large */ + if (__predict_false(!PageSlab(page))) + kasan_kmalloc_large(object, size, flags); +// else +// kasan_kmalloc(page->slab_cache, object, size, flags); +} + +/* + * NetBSD alternative - free has been depreciated and made a wrapper around + * kmem_free. Will remove if no functions need this. + */ +void +kasan_poison_kfree(void *ptr, unsigned long ip) +{ + struct page *page; + + page = virt_to_head_page(ptr); + + if (__predict_false(!PageSlab(page))) { + if (ptr != page_address(page)) { +// kasan_report_invalid_free(ptr, ip); + return; + } + kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), + KASAN_FREE_PAGE); + } else { +// __kasan_slab_free(page->slab_cache, ptr, ip, false); + } +} + +/* + * NetBSD alternative - free has been deprecited and made a wrapper around + * kmem_free. Will remove if no functions need this. + */ +void +kasan_kfree_large(void *ptr, unsigned long ip) +{ + if (ptr != page_address(virt_to_head_page(ptr))) + return ; +// kasan_report_invalid_free(ptr, ip); + /* The object will be poisoned by page_alloc. */ + +} + +/* Module part will be dealt with later +int kasan_module_alloc(void *addr, size_t size) +{ + void *ret; + size_t shadow_size; + unsigned long shadow_start; + + shadow_start = (unsigned long)kasan_mem_to_shadow(addr); + shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, + PAGE_SIZE); + + if (WARN_ON(!PAGE_ALIGNED(shadow_start))) + return -EINVAL; + + ret = __vmalloc_node_range(shadow_size, 1, shadow_start, + shadow_start + shadow_size, + GFP_KERNEL | __GFP_ZERO, + PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, + __builtin_return_address(0)); + + if (ret) { + find_vm_area(addr)->flags |= VM_KASAN; + kmemleak_ignore(ret); + return 0; + } + + return -ENOMEM; +} +*/ + +/* TODO */ +void +kasan_free_shadow(const struct vm_struct *vm) +{ +/* + if (vm->flags & VM_KASAN) + vfree(kasan_mem_to_shadow(vm->addr)); +*/ +} + +/* */ +static void +register_global(struct kasan_global *global) +{ + size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); + + /* unposion the global area */ + kasan_unpoison_shadow(global->beg, global->size); + + kasan_poison_shadow((void *)((uintptr_t)global->beg + aligned_size), + global->size_with_redzone - aligned_size, + KASAN_GLOBAL_REDZONE); +} + +void +__asan_register_globals(struct kasan_global *globals, size_t size) +{ + int i; + + for (i = 0; i < size; i++) + register_global(&globals[i]); +} + +void +__asan_unregister_globals(struct kasan_global *globals, size_t size) +{ +} + +#define DEFINE_ASAN_LOAD_STORE(size) \ + void __asan_load##size(unsigned long addr) \ + { \ + check_memory_region_inline(addr, size, false, _RET_IP_);\ + } \ + void __asan_load##size##_noabort(unsigned long); \ + void __asan_load##size##_noabort(unsigned long addr) \ + {\ + check_memory_region_inline(addr, size, false, _RET_IP_);\ + } \ + void __asan_store##size(unsigned long addr) \ + {\ + check_memory_region_inline(addr, size, true, _RET_IP_);\ + } \ + void __asan_store##size##_noabort(unsigned long); \ + void __asan_store##size##_noabort(unsigned long addr) \ + {\ + check_memory_region_inline(addr, size, true, _RET_IP_);\ + } \ + + +DEFINE_ASAN_LOAD_STORE(1); +DEFINE_ASAN_LOAD_STORE(2); +DEFINE_ASAN_LOAD_STORE(4); +DEFINE_ASAN_LOAD_STORE(8); +DEFINE_ASAN_LOAD_STORE(16); + +void +__asan_loadN(unsigned long addr, size_t size) +{ + check_memory_region(addr, size, false, _RET_IP_); +} + +void __asan_loadN_noabort(unsigned long, size_t); +void +__asan_loadN_noabort(unsigned long addr, size_t size) +{ + check_memory_region(addr, size, false, _RET_IP_); +} + +void __asan_storeN(unsigned long addr, size_t size) +{ + check_memory_region(addr, size, true, _RET_IP_); +} + +void __asan_storeN_noabort(unsigned long, size_t); +void +__asan_storeN_noabort(unsigned long addr, size_t size) +{ + check_memory_region(addr, size, true, _RET_IP_); +} + +/* to shut up compiler complaints */ + +void __asan_handle_no_return(void) {} + +/* Emitted by compiler to poison large objects when they go out of scope. */ +void +__asan_poison_stack_memory(const void *addr, size_t size) +{ + + /* + * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded + * by redzones, so we simply round up size to simplify logic. + */ + kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE), + KASAN_USE_AFTER_SCOPE); +} + +/* Emitted by compiler to unpoison large objects when they go into scope. */ + +void +__asan_unpoison_stack_memory(const void *addr, size_t size) +{ + kasan_unpoison_shadow(addr, size); +} + +/* Emitted by compiler to poison alloca()ed objects. */ +void +__asan_alloca_poison(unsigned long addr, size_t size) +{ + size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); + size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - + rounded_up_size; + size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE); + + const void *left_redzone = (const void *)(addr - + KASAN_ALLOCA_REDZONE_SIZE); + const void *right_redzone = (const void *)(addr + rounded_up_size); + + //WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); + + kasan_unpoison_shadow((const void *)(addr + rounded_down_size), + size - rounded_down_size); + kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, + KASAN_ALLOCA_LEFT); + kasan_poison_shadow(right_redzone, + padding_size + KASAN_ALLOCA_REDZONE_SIZE, + KASAN_ALLOCA_RIGHT); +} + +/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ +void +__asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) +{ + if (__predict_false(!stack_top || stack_top > stack_bottom)) + return; + + kasan_unpoison_shadow(stack_top, (const char *)stack_bottom - (const char *)stack_top); +} + +/* Emitted by the compiler to [un]poison local variables. */ +#define DEFINE_ASAN_SET_SHADOW(byte) \ + void __asan_set_shadow_##byte(void *addr, size_t size) \ + { \ + __builtin_memset((void *)addr, 0x##byte, size); \ + } \ + +DEFINE_ASAN_SET_SHADOW(00); +DEFINE_ASAN_SET_SHADOW(f1); +DEFINE_ASAN_SET_SHADOW(f2); +DEFINE_ASAN_SET_SHADOW(f3); +DEFINE_ASAN_SET_SHADOW(f5); +DEFINE_ASAN_SET_SHADOW(f8); +/* +#ifdef CONFIG_MEMORY_HOTPLUG +static bool shadow_mapped(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (pgd_none(*pgd)) + return false; + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) + return false; + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + return false; +*/ + /* + * We can't use pud_large() or pud_huge(), the first one is + * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse + * pud_bad(), if pud is bad then it's bad because it's huge. + */ +/* if (pud_bad(*pud)) + return true; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return false; + + if (pmd_bad(*pmd)) + return true; + pte = pte_offset_kernel(pmd, addr); + return !pte_none(*pte); +} + +static int __meminit kasan_mem_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct memory_notify *mem_data = data; + unsigned long nr_shadow_pages, start_kaddr, shadow_start; + unsigned long shadow_end, shadow_size; + + nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT; + start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn); + shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr); + shadow_size = nr_shadow_pages << PAGE_SHIFT; + shadow_end = shadow_start + shadow_size; + + if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) || + WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT))) + return NOTIFY_BAD; + + switch (action) { + case MEM_GOING_ONLINE: { + void *ret; +*/ + /* + * If shadow is mapped already than it must have been mapped + * during the boot. This could happen if we onlining previously + * offlined memory. + */ +/* if (shadow_mapped(shadow_start)) + return NOTIFY_OK; + + ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, + shadow_end, GFP_KERNEL, + PAGE_KERNEL, VM_NO_GUARD, + pfn_to_nid(mem_data->start_pfn), + __builtin_return_address(0)); + if (!ret) + return NOTIFY_BAD; + + kmemleak_ignore(ret); + return NOTIFY_OK; + } + case MEM_CANCEL_ONLINE: + case MEM_OFFLINE: { + struct vm_struct *vm; +*/ + /* + * shadow_start was either mapped during boot by kasan_init() + * or during memory online by __vmalloc_node_range(). + * In the latter case we can use vfree() to free shadow. + * Non-NULL result of the find_vm_area() will tell us if + * that was the second case. + * + * Currently it's not possible to free shadow mapped + * during boot by kasan_init(). It's because the code + * to do that hasn't been written yet. So we'll just + * leak the memory. + */ +/* vm = find_vm_area((void *)shadow_start); + if (vm) + vfree((void *)shadow_start); + } + } + + return NOTIFY_OK; +} + +static int __init kasan_memhotplug_init(void) +{ + hotplug_memory_notifier(kasan_mem_notifier, 0); + + return 0; +} + +core_initcall(kasan_memhotplug_init); +#endif*/ diff -uNr src.git-mirror/sys/kern/kern_asan_init.c src.git/sys/kern/kern_asan_init.c --- src.git-mirror/sys/kern/kern_asan_init.c 1970-01-01 01:00:00.000000000 +0100 +++ src.git/sys/kern/kern_asan_init.c 2018-08-17 18:37:26.760137486 +0200 @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +//#define DISABLE_BRANCH_PROFILING +//#define pr_fmt(fmt) "kasan: " fmt + +#include +__KERNEL_RCSID(0, "$NetBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +struct seg_details { + unsigned long vaddr; + unsigned long size; +}; + +static struct seg_details kmap[4]; + +void DumpSegments(void); +extern struct bootspace bootspace; + +void +DumpSegments(void) +{ + size_t i; + + /* + * Copy the addresses and sizes of the various regions in the + * bootspace structure into our kernel structure + */ + for (i = 0; i < BTSPACE_NSEGS; i++) { + if (bootspace.segs[i].type == BTSEG_NONE) { + continue; + } + kmap[bootspace.segs[i].type].vaddr = bootspace.segs[i].va; + kmap[bootspace.segs[i].type].size = bootspace.segs[i].sz; + } +} + +unsigned long text_start; +unsigned long text_end; + +/* Need to get a different offset */ +#define CPU_ENTRY_AREA_BASE 0xff0000 //temp value +#define CPU_ENTRY_AREA_END 0xff0000 //temp value +#define MAX_BITS 46 +#define MAX_MEM (1UL << MAX_BITS) + + +/* + * + */ +void +kasan_early_init(void) +{ + return ; +} + +/* + * kasan_init is supposed to be called after the pmap(9) and uvm(9) bootstraps + * are done - since we have opted to uses high level allocator function - + * uvm_km_alloc to get the shadow region mapped. This is done with the idea + * that the area we are allocating is a unused hole. + */ +void +kasan_init(void) +{ + struct pmap *kernmap; + struct vm_map shadow_map; + // void *shadow_begin, *shadow_end; + + /* clearing page table entries for the shadow region */ + kernmap = pmap_kernel(); + pmap_remove(kernmap, KASAN_SHADOW_START, KASAN_SHADOW_END); + pmap_update(kernmap); + + + /* Text Section and main shadow offsets */ + DumpSegments(); + text_start = kmap[1].vaddr; + text_end = kmap[1].vaddr + kmap[1].size; + + /* Initialize the kernel map for the unallocated region + + Alternate way to set up the map. + uvm_map_setup(&shadow_map, (vaddr_t)KASAN_SHADOW_START, + (vaddr_t)KASAN_SHADOW_END, VM_MAP_PAGEABLE); + shadow_map.pmap = pmap_kernel(); //Not sure about this + + Might need to add a check to see if everything worked properly + + error = uvm_map_prepare(&shadow_map, + kmembase, kmemsize, + NULL, UVM_UNKNOWN_OFFSET, 0, + UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, + UVM_ADV_RANDOM, UVM_FLAG_FIXED), NULL); + if (!error) { + kernel_kmem_mapent_store.flags = + UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; + } + + */ + + uvm_map(&shadow_map, (vaddr_t *)KASAN_SHADOW_START, + (size_t)(KASAN_SHADOW_END - KASAN_SHADOW_START), NULL, + UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, + UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)); + + /* + * Map is ready - now we can allocate the shadow buffer + * Allocate zero pages for the shadow region + * We wll do the sections as in Linux + */ + + /* User space */ + vm_map_setmin(&shadow_map, KASAN_SHADOW_START); + uvm_km_alloc(&shadow_map, ((vsize_t)(kasan_mem_to_shadow(L4_BASE)) + - KASAN_SHADOW_START), 0, UVM_KMF_ZERO); + + /* Second region is L4_BASE+MAX_MEM to start of the cpu entry region */ + vm_map_setmin(&shadow_map, (unsigned long)kasan_mem_to_shadow(L4_BASE + + MAX_MEM)); + uvm_km_alloc(&shadow_map, (vsize_t)kasan_mem_to_shadow((void *)(L4_BASE + + MAX_MEM)) - (vsize_t)kasan_mem_to_shadow((void *) + CPU_ENTRY_AREA_BASE), 0, UVM_KMF_ZERO); + + /* The cpu entry region - nid as 0*/ + uvm_km_alloc(&shadow_map, (vsize_t)kasan_mem_to_shadow((void *) + CPU_ENTRY_AREA_END) - (vsize_t)kasan_mem_to_shadow((void *) + CPU_ENTRY_AREA_BASE), 0, UVM_KMF_ZERO); + + /* Cpu end region to start of kernel map (KERNBASE)*/ + uvm_km_alloc(&shadow_map, (vsize_t)kasan_mem_to_shadow((void *) + CPU_ENTRY_AREA_END) - (vsize_t)kasan_mem_to_shadow((void *) + KERNBASE), 0, UVM_KMF_ZERO); + + /* The text section - nid as something */ + vm_map_setmin(&shadow_map, text_start); + uvm_km_alloc(&shadow_map, (vsize_t)kasan_mem_to_shadow((void *) + text_end) - (vsize_t)kasan_mem_to_shadow((void *) + text_start), 0, UVM_KMF_ZERO); + + /* Avoiding the Module map for now */ +} diff -uNr src.git-mirror/sys/kern/kern_asan_quarantine.c src.git/sys/kern/kern_asan_quarantine.c --- src.git-mirror/sys/kern/kern_asan_quarantine.c 1970-01-01 01:00:00.000000000 +0100 +++ src.git/sys/kern/kern_asan_quarantine.c 2018-08-17 18:37:26.785663933 +0200 @@ -0,0 +1,311 @@ +#include +__KERNEL_RCSID(0, "$NetBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* Data structure and operations for quarantine queues. */ + +/* + * Each queue is a signle-linked list, which also stores the total size of + * objects inside of it. + */ +/* +struct qlist_head { + struct qlist_node *head; + struct qlist_node *tail; + size_t bytes; +}; + +#define QLIST_INIT { NULL, NULL, 0 } + +static bool qlist_empty(struct qlist_head *q) +{ + return !q->head; +} + +static void qlist_init(struct qlist_head *q) +{ + q->head = q->tail = NULL; + q->bytes = 0; +} + +static void qlist_put(struct qlist_head *q, struct qlist_node *qlink, + size_t size) +{ + if (__predict_false(qlist_empty(q))) + q->head = qlink; + else + q->tail->next = qlink; + q->tail = qlink; + qlink->next = NULL; + q->bytes += size; +} + +static void qlist_move_all(struct qlist_head *from, struct qlist_head *to) +{ + if (__predict_false(qlist_empty(from))) + return; + + if (qlist_empty(to)) { + *to = *from; + qlist_init(from); + return; + } + + to->tail->next = from->head; + to->tail = from->tail; + to->bytes += from->bytes; + + qlist_init(from); +} + +#define QUARANTINE_PERCPU_SIZE (1 << 20) +#define QUARANTINE_BATCHES \ + (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS) +*/ +/* + * The object quarantine consists of per-cpu queues and a global queue, + * guarded by quarantine_lock. + */ +//static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine); + +/* Round-robin FIFO array of batches. */ +//static struct qlist_head global_quarantine[QUARANTINE_BATCHES]; +//static int quarantine_head; +//static int quarantine_tail; +/* Total size of all objects in global_quarantine across all batches. */ +//static unsigned long quarantine_size; +//static DEFINE_SPINLOCK(quarantine_lock); +//DEFINE_STATIC_SRCU(remove_cache_srcu); + +/* Maximum size of the global queue. */ +//static unsigned long quarantine_max_size; + +/* + * Target size of a batch in global_quarantine. + * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM. + */ +//static unsigned long quarantine_batch_size; + +/* + * The fraction of physical memory the quarantine is allowed to occupy. + * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep + * the ratio low to avoid OOM. + */ +#define QUARANTINE_FRACTION 32 +/* +static struct pool_cache *qlink_to_cache(struct qlist_node *qlink) +{ + return virt_to_head_page(qlink)->slab_cache; +} + +static void *qlink_to_object(struct qlist_node *qlink, struct pool_cache *cache) +{ + struct kasan_free_meta *free_info = + container_of(qlink, struct kasan_free_meta, + quarantine_link); + + return ((void *)free_info) - cache->kasan_info.free_meta_offset; +} + +static void qlink_free(struct qlist_node *qlink, struct pool_cache *cache) +{ + void *object = qlink_to_object(qlink, cache); + unsigned long flags; + + if (IS_ENABLED(CONFIG_SLAB)) + local_irq_save(flags); + + ___cache_free(cache, object, _THIS_IP_); + + if (IS_ENABLED(CONFIG_SLAB)) + local_irq_restore(flags); +} + +static void qlist_free_all(struct qlist_head *q, struct pool_cache *cache) +{ + struct qlist_node *qlink; + + if (__predict_false(qlist_empty(q))) + return; + + qlink = q->head; + while (qlink) { + struct pool_cache *obj_cache = + cache ? cache : qlink_to_cache(qlink); + struct qlist_node *next = qlink->next; + + qlink_free(qlink, obj_cache); + qlink = next; + } + qlist_init(q); +} + +void quarantine_put(struct kasan_free_meta *info, struct pool_cache *cache) +{ + unsigned long flags; + struct qlist_head *q; + struct qlist_head temp = QLIST_INIT; +*/ /* + * Note: irq must be disabled until after we move the batch to the + * global quarantine. Otherwise quarantine_remove_cache() can miss + * some objects belonging to the cache if they are in our local temp + * list. quarantine_remove_cache() executes on_each_cpu() at the + * beginning which ensures that it either sees the objects in per-cpu + * lists or in the global quarantine. + */ +/* local_irq_save(flags); + + q = this_cpu_ptr(&cpu_quarantine); + qlist_put(q, &info->quarantine_link, cache->size); + if (__predict_false(q->bytes > QUARANTINE_PERCPU_SIZE)) { + qlist_move_all(q, &temp); + + spin_lock(&quarantine_lock); + WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); + qlist_move_all(&temp, &global_quarantine[quarantine_tail]); + if (global_quarantine[quarantine_tail].bytes >= + READ_ONCE(quarantine_batch_size)) { + int new_tail; + + new_tail = quarantine_tail + 1; + if (new_tail == QUARANTINE_BATCHES) + new_tail = 0; + if (new_tail != quarantine_head) + quarantine_tail = new_tail; + } + spin_unlock(&quarantine_lock); + } + + local_irq_restore(flags); +} + +void quarantine_reduce(void) +{ + size_t total_size, new_quarantine_size, percpu_quarantines; + unsigned long flags; + int srcu_idx; + struct qlist_head to_free = QLIST_INIT; + + if (likely(READ_ONCE(quarantine_size) <= + READ_ONCE(quarantine_max_size))) + return; +*/ + /* + * srcu critical section ensures that quarantine_remove_cache() + * will not miss objects belonging to the cache while they are in our + * local to_free list. srcu is chosen because (1) it gives us private + * grace period domain that does not interfere with anything else, + * and (2) it allows synchronize_srcu() to return without waiting + * if there are no pending read critical sections (which is the + * expected case). + */ +/* srcu_idx = srcu_read_lock(&remove_cache_srcu); + spin_lock_irqsave(&quarantine_lock, flags); +*/ + /* + * Update quarantine size in case of hotplug. Allocate a fraction of + * the installed memory to quarantine minus per-cpu queue limits. + */ +/* total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / + QUARANTINE_FRACTION; + percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); + new_quarantine_size = (total_size < percpu_quarantines) ? + 0 : total_size - percpu_quarantines; + WRITE_ONCE(quarantine_max_size, new_quarantine_size); +*/ /* Aim at consuming at most 1/2 of slots in quarantine. */ +/* WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE, + 2 * total_size / QUARANTINE_BATCHES)); + + if (likely(quarantine_size > quarantine_max_size)) { + qlist_move_all(&global_quarantine[quarantine_head], &to_free); + WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes); + quarantine_head++; + if (quarantine_head == QUARANTINE_BATCHES) + quarantine_head = 0; + } + + spin_unlock_irqrestore(&quarantine_lock, flags); + + qlist_free_all(&to_free, NULL); + srcu_read_unlock(&remove_cache_srcu, srcu_idx); +} + +static void qlist_move_cache(struct qlist_head *from, + struct qlist_head *to, + struct pool_cache *cache) +{ + struct qlist_node *curr; + + if (__predict_false(qlist_empty(from))) + return; + + curr = from->head; + qlist_init(from); + while (curr) { + struct qlist_node *next = curr->next; + struct pool_cache *obj_cache = qlink_to_cache(curr); + + if (obj_cache == cache) + qlist_put(to, curr, obj_cache->size); + else + qlist_put(from, curr, obj_cache->size); + + curr = next; + } +} + +static void per_cpu_remove_cache(void *arg) +{ + struct pool_cache *cache = arg; + struct qlist_head to_free = QLIST_INIT; + struct qlist_head *q; + + q = this_cpu_ptr(&cpu_quarantine); + qlist_move_cache(q, &to_free, cache); + qlist_free_all(&to_free, cache); +} +*/ +/* Free all quarantined objects belonging to cache. */ +void quarantine_remove_cache(struct pool_cache *cache) +{/* + unsigned long flags, i; + struct qlist_head to_free = QLIST_INIT; +*/ + /* + * Must be careful to not miss any objects that are being moved from + * per-cpu list to the global quarantine in quarantine_put(), + * nor objects being freed in quarantine_reduce(). on_each_cpu() + * achieves the first goal, while synchronize_srcu() achieves the + * second. + */ +/* on_each_cpu(per_cpu_remove_cache, cache, 1); + + spin_lock_irqsave(&quarantine_lock, flags); + for (i = 0; i < QUARANTINE_BATCHES; i++) { + if (qlist_empty(&global_quarantine[i])) + continue; + qlist_move_cache(&global_quarantine[i], &to_free, cache); +*/ /* Scanning whole quarantine can take a while. */ +/* spin_unlock_irqrestore(&quarantine_lock, flags); + cond_resched(); + spin_lock_irqsave(&quarantine_lock, flags); + } + spin_unlock_irqrestore(&quarantine_lock, flags); + + qlist_free_all(&to_free, cache); + + synchronize_srcu(&remove_cache_srcu);*/ +} diff -uNr src.git-mirror/sys/kern/kern_asan_report.c src.git/sys/kern/kern_asan_report.c --- src.git-mirror/sys/kern/kern_asan_report.c 1970-01-01 01:00:00.000000000 +0100 +++ src.git/sys/kern/kern_asan_report.c 2018-08-17 18:37:26.797075774 +0200 @@ -0,0 +1,465 @@ +#include +__KERNEL_RCSID(0, "$NetBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* Shadow layout customization. */ +#define SHADOW_BYTES_PER_BLOCK 1 +#define SHADOW_BLOCKS_PER_ROW 16 +#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK) +#define SHADOW_ROWS_AROUND_ADDR 2 +#define _RET_IP_ (unsigned long)__builtin_return_address(0) + +#define TASK_SIZE 30 //Temp + +#define panic printf +//Typedefs for current version +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef uint8_t __u8; +typedef uint16_t __u16; +typedef uint32_t __u32; +typedef uint64_t __u64; + +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; + +typedef int8_t __s8; +typedef int16_t __s16; +typedef int32_t __s32; +typedef int64_t __s64; + +typedef uint16_t __le16; +typedef uint32_t __le32; +typedef uint64_t __le64; + +typedef uint16_t __be16; +typedef uint32_t __be32; +typedef uint64_t __be64; + +//End of typedefs + +static const void *find_first_bad_addr(const void *addr, size_t size) +{ + //u8 shadow_val = *(u8 *)kasan_mem_to_shadow(addr); + const void *first_bad_addr = addr; + +/* while (!shadow_val && first_bad_addr < (const char *)addr + size) { + (const char *)first_bad_addr += KASAN_SHADOW_SCALE_SIZE; + shadow_val = *(u8 *)kasan_mem_to_shadow(first_bad_addr); + } +*/ return first_bad_addr; +} + +static bool addr_has_shadow(struct kasan_bug_info *info) +{ + return (info->access_addr >= + kasan_shadow_to_mem((void *)KASAN_SHADOW_START)); +} + +static const char *get_shadow_bug_type(struct kasan_bug_info *info) +{ + const char *bug_type = "unknown-crash"; + u8 *shadow_addr; + + info->first_bad_addr = find_first_bad_addr(info->access_addr, + info->access_size); + + shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr); + + /* + * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look + * at the next shadow byte to determine the type of the bad access. + */ + if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1) + shadow_addr++; + + switch (*shadow_addr) { + case 0 ... KASAN_SHADOW_SCALE_SIZE - 1: + /* + * In theory it's still possible to see these shadow values + * due to a data race in the kernel code. + */ + bug_type = "out-of-bounds"; + break; + case KASAN_PAGE_REDZONE: + case KASAN_KMALLOC_REDZONE: + bug_type = "slab-out-of-bounds"; + break; + case KASAN_GLOBAL_REDZONE: + bug_type = "global-out-of-bounds"; + break; + case KASAN_STACK_LEFT: + case KASAN_STACK_MID: + case KASAN_STACK_RIGHT: + case KASAN_STACK_PARTIAL: + bug_type = "stack-out-of-bounds"; + break; + case KASAN_FREE_PAGE: + case KASAN_KMALLOC_FREE: + bug_type = "use-after-free"; + break; + case KASAN_USE_AFTER_SCOPE: + bug_type = "use-after-scope"; + break; + case KASAN_ALLOCA_LEFT: + case KASAN_ALLOCA_RIGHT: + bug_type = "alloca-out-of-bounds"; + break; + } + + return bug_type; +} + +static const char *get_wild_bug_type(struct kasan_bug_info *info) +{ + const char *bug_type = "unknown-crash"; + + if ((unsigned long)info->access_addr < PAGE_SIZE) + bug_type = "null-ptr-deref"; + else if ((unsigned long)info->access_addr < TASK_SIZE) + bug_type = "user-memory-access"; + else + bug_type = "wild-memory-access"; + + return bug_type; +} + +static const char *get_bug_type(struct kasan_bug_info *info) +{ + if (addr_has_shadow(info)) + return get_shadow_bug_type(info); + return get_wild_bug_type(info); +} + +static void print_error_description(struct kasan_bug_info *info) +{ + const char *bug_type = get_bug_type(info); + + snprintf(info->bug_type, 50, "BUG: KASAN: %s in %pS\n", + bug_type, (void *)info->ip); + snprintf(info->bug_info, 60, "%s of size %zu at addr %px\n", + info->is_write ? "Write" : "Read", info->access_size, + info->access_addr); +} + +extern unsigned long text_start ; +extern unsigned long text_end ; + +static inline bool kernel_or_module_addr(const void *addr) +{ + if (addr >= (void *)text_start && addr < (void *)text_end) + return true; +/* if (is_module_address((unsigned long)addr)) + return true; +*/ return false; +} +/* +static inline bool init_task_stack_addr(const void *addr) +{ + return addr >= (void *)&init_thread_union.stack && + (addr <= (void *)&init_thread_union.stack + + sizeof(init_thread_union.stack)); +} + +static DEFINE_SPINLOCK(report_lock); +*/ + +/* + * REMOVED start_report and end report + */ + +/* +static void print_track(struct kasan_track *track, const char *prefix) +{ + pr_err("%s by task %u:\n", prefix, track->pid); + if (track->stack) { + struct stack_trace trace; + + depot_fetch_stack(track->stack, &trace); + print_stack_trace(&trace, 0); + } else { + pr_err("(stack is not available)\n"); + } +} + +static struct page *addr_to_page(const void *addr) +{ + if ((addr >= (void *)PAGE_OFFSET) && + (addr < high_memory)) + return virt_to_head_page(addr); + return NULL; +} + +static void describe_object_addr(struct kmem_cache *cache, void *object, + const void *addr) +{ + unsigned long access_addr = (unsigned long)addr; + unsigned long object_addr = (unsigned long)object; + const char *rel_type; + int rel_bytes; + + pr_err("The buggy address belongs to the object at %px\n" + " which belongs to the cache %s of size %d\n", + object, cache->name, cache->object_size); + + if (!addr) + return; + + if (access_addr < object_addr) { + rel_type = "to the left"; + rel_bytes = object_addr - access_addr; + } else if (access_addr >= object_addr + cache->object_size) { + rel_type = "to the right"; + rel_bytes = access_addr - (object_addr + cache->object_size); + } else { + rel_type = "inside"; + rel_bytes = access_addr - object_addr; + } + + pr_err("The buggy address is located %d bytes %s of\n" + " %d-byte region [%px, %px)\n", + rel_bytes, rel_type, cache->object_size, (void *)object_addr, + (void *)(object_addr + cache->object_size)); +} + +static void describe_object(struct kmem_cache *cache, void *object, + const void *addr) +{ + struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); + + if (cache->flags & SLAB_KASAN) { + print_track(&alloc_info->alloc_track, "Allocated"); + pr_err("\n"); + print_track(&alloc_info->free_track, "Freed"); + pr_err("\n"); + } + + describe_object_addr(cache, object, addr); +} + +static void print_address_description(void *addr) +{ + struct page *page = addr_to_page(addr); + + dump_stack(); + pr_err("\n"); + + if (page && PageSlab(page)) { + struct kmem_cache *cache = page->slab_cache; + void *object = nearest_obj(cache, page, addr); + + describe_object(cache, object, addr); + } + + if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) { + pr_err("The buggy address belongs to the variable:\n"); + pr_err(" %pS\n", addr); + } + + if (page) { + pr_err("The buggy address belongs to the page:\n"); + dump_page(page, "kasan: bad access detected"); + } +} + +static bool row_is_guilty(const void *row, const void *guilty) +{ + return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW); +} + +static int shadow_pointer_offset(const void *row, const void *shadow) +{ +*/ /* The length of ">ff00ff00ff00ff00: " is + * 3 + (BITS_PER_LONG/8)*2 chars. + */ +/* return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 + + (shadow - row) / SHADOW_BYTES_PER_BLOCK + 1; +} + +static void print_shadow_for_address(const void *addr) +{ + int i; + const void *shadow = kasan_mem_to_shadow(addr); + const void *shadow_row; + + shadow_row = (void *)round_down((unsigned long)shadow, + SHADOW_BYTES_PER_ROW) + - SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW; + + pr_err("Memory state around the buggy address:\n"); + + for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) { + const void *kaddr = kasan_shadow_to_mem(shadow_row); + char buffer[4 + (BITS_PER_LONG/8)*2]; + char shadow_buf[SHADOW_BYTES_PER_ROW]; + + snprintf(buffer, sizeof(buffer), + (i == 0) ? ">%px: " : " %px: ", kaddr); +*/ /* + * We should not pass a shadow pointer to generic + * function, because generic functions may try to + * access kasan mapping for the passed address. + */ +/* memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW); + print_hex_dump(KERN_ERR, buffer, + DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1, + shadow_buf, SHADOW_BYTES_PER_ROW, 0); + + if (row_is_guilty(shadow_row, shadow)) + pr_err("%*c\n", + shadow_pointer_offset(shadow_row, shadow), + '^'); + + shadow_row += SHADOW_BYTES_PER_ROW; + } +} + +void kasan_report_invalid_free(void *object, unsigned long ip) +{ + unsigned long flags; + + kasan_start_report(&flags); + pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip); + pr_err("\n"); + print_address_description(object); + pr_err("\n"); + print_shadow_for_address(object); + kasan_end_report(&flags); +} +*/ +static void kasan_report_error(struct kasan_bug_info *info) +{ + const char *delimit = "==================================================================\n"; + + info->start = delimit; + info->end = delimit; + + print_error_description(info); + + if (!addr_has_shadow(info)) { +// dump_stack(); + } else { +// print_address_description((void *)info->access_addr); +// pr_err("\n"); +// print_shadow_for_address(info->first_bad_addr); + } + +} + +static void +kasan_print_report(struct kasan_bug_info *info) +{ + panic("%.67s %s\n %s\n %.67s", info->start, info->bug_type, info->bug_info, info->end); +} + +//static unsigned long kasan_flags; + +#define KASAN_BIT_REPORTED 0 +#define KASAN_BIT_MULTI_SHOT 1 + +bool kasan_save_enable_multi_shot(void); +bool kasan_save_enable_multi_shot(void) +{ +// return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); + return true; +} + +void kasan_restore_multi_shot(bool); +void kasan_restore_multi_shot(bool enabled) +{ +// if (!enabled) +// clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); +} +/* +static int __init kasan_set_multi_shot(char *str) +{ + set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); + return 1; +} +__setup("kasan_multi_shot", kasan_set_multi_shot); + +static inline bool kasan_report_enabled(void) +{ + if (kasan_depth) + return false; + if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) + return true; + return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags); +} +*/ +void kasan_report(unsigned long addr, size_t size, + bool is_write, unsigned long ip) +{ + struct kasan_bug_info info; + +/* if (likely(!kasan_report_enabled())) + return; +Not necessary maybe +*/ +// disable_trace_on_warning(); + + info.access_addr = (void *)addr; + info.first_bad_addr = (void *)addr; + info.access_size = size; + info.is_write = is_write; + info.ip = ip; + + kasan_report_error(&info); + kasan_print_report(&info); +} + + +#define DEFINE_ASAN_REPORT_LOAD(size) \ +void __asan_report_load##size##_noabort(unsigned long); \ +void __asan_report_load##size##_noabort(unsigned long addr) \ +{ \ + kasan_report(addr, size, false, _RET_IP_); \ +} \ + +#define DEFINE_ASAN_REPORT_STORE(size) \ +void __asan_report_store##size##_noabort(unsigned long ); \ +void __asan_report_store##size##_noabort(unsigned long addr) \ +{ \ + kasan_report(addr, size, true, _RET_IP_); \ +} \ + +DEFINE_ASAN_REPORT_LOAD(1); +DEFINE_ASAN_REPORT_LOAD(2); +DEFINE_ASAN_REPORT_LOAD(4); +DEFINE_ASAN_REPORT_LOAD(8); +DEFINE_ASAN_REPORT_LOAD(16); +DEFINE_ASAN_REPORT_STORE(1); +DEFINE_ASAN_REPORT_STORE(2); +DEFINE_ASAN_REPORT_STORE(4); +DEFINE_ASAN_REPORT_STORE(8); +DEFINE_ASAN_REPORT_STORE(16); + +void __asan_report_load_n_noabort(unsigned long , size_t ); +void __asan_report_load_n_noabort(unsigned long addr, size_t size) +{ + kasan_report(addr, size, false, _RET_IP_); +} + +void __asan_report_store_n_noabort(unsigned long, size_t ); +void __asan_report_store_n_noabort(unsigned long addr, size_t size) +{ + kasan_report(addr, size, true, _RET_IP_); +} diff -uNr src.git-mirror/sys/kern/subr_pool.c src.git/sys/kern/subr_pool.c --- src.git-mirror/sys/kern/subr_pool.c 2018-08-17 18:51:35.192586067 +0200 +++ src.git/sys/kern/subr_pool.c 2018-08-17 18:37:26.818689319 +0200 @@ -55,6 +55,7 @@ #include #include #include +#include #include @@ -126,6 +127,7 @@ typedef uint32_t pool_item_bitmap_t; #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) #define BITMAP_MASK (BITMAP_SIZE - 1) +#define _RET_IP_ (unsigned long)__builtin_return_address(0) struct pool_item_header { /* Page headers */ @@ -1757,7 +1759,7 @@ pc = pool_get(&cache_pool, PR_WAITOK); if (pc == NULL) return NULL; - + kasan_cache_create(pc, &size, &flags); pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, palloc, ipl, ctor, dtor, arg); @@ -2318,6 +2320,7 @@ break; } + kasan_slab_alloc(pc, &object, flags); /* * We would like to KASSERT(object || (flags & PR_NOWAIT)), but * pool_cache_get can fail even in the PR_WAITOK case, if the @@ -2445,6 +2448,8 @@ pool_redzone_check(&pc->pc_pool, object); FREECHECK_IN(&pc->pc_freecheck, object); + if(kasan_slab_free(pc, object, _RET_IP_)) + return; /* Lock out interrupts and disable preemption. */ s = splvm(); while (/* CONSTCOND */ true) { @@ -2480,6 +2485,7 @@ if (!pool_cache_put_slow(cc, s, object)) break; } + } /* diff -uNr src.git-mirror/sys/modules/examples/Makefile src.git/sys/modules/examples/Makefile --- src.git-mirror/sys/modules/examples/Makefile 2018-08-17 18:51:35.434597663 +0200 +++ src.git/sys/modules/examples/Makefile 2018-08-17 18:36:37.338412331 +0200 @@ -4,6 +4,7 @@ SUBDIR+= executor SUBDIR+= hello +SUBDIR+= kernel_map #SUBDIR+= luahello # Nothing to build here, only text files SUBDIR+= luareadhappy # Needs an additional Lua script SUBDIR+= panic_string # Crashes the system diff -uNr src.git-mirror/sys/modules/examples/kernel_map/Makefile src.git/sys/modules/examples/kernel_map/Makefile --- src.git-mirror/sys/modules/examples/kernel_map/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ src.git/sys/modules/examples/kernel_map/Makefile 2018-08-17 18:36:37.360811457 +0200 @@ -0,0 +1,10 @@ +# $NetBSD: Makefile,v 1.1 2018/04/13 01:20:28 kamil Exp $ + +.include "../Makefile.inc" + +#S?= /usr/src/sys + +KMOD= kernel_map +SRCS= kernel_map.c + +.include diff -uNr src.git-mirror/sys/modules/examples/kernel_map/kernel_map.c src.git/sys/modules/examples/kernel_map/kernel_map.c --- src.git-mirror/sys/modules/examples/kernel_map/kernel_map.c 1970-01-01 01:00:00.000000000 +0100 +++ src.git/sys/modules/examples/kernel_map/kernel_map.c 2018-08-17 18:36:37.378356824 +0200 @@ -0,0 +1,224 @@ +/* $NetBSD$*/ + +/*- + * Copyright (c) 2015 The NetBSD Foundation, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__KERNEL_RCSID(0, "$NetBSD$"); + +#include +#include +#include +#include +#include +#include +//#include + +#include + +#include +#include + +#define BTSEG_HEAP 4 +#define BTSEG_STACK 5 + +#define NO_OF_SEGS 6 + +#define MINKERN 0 +#define MAXKERN 1 +#define MINVM 2 +#define MAXVM 3 + +/* + * + * To use this device you need to do: + * mknod /dev/kernel_map c 210 0 + * + */ + +dev_type_open(kernel_map_open); +dev_type_close(kernel_map_close); +dev_type_read(kernel_map_read); + +static struct cdevsw kernel_map_cdevsw = { + .d_open = kernel_map_open, + .d_close = kernel_map_close, + .d_read = kernel_map_read, + .d_write = nowrite, + .d_ioctl = noioctl, + .d_stop = nostop, + .d_tty = notty, + .d_poll = nopoll, + .d_mmap = nommap, + .d_kqfilter = nokqfilter, + .d_discard = nodiscard, + .d_flag = D_OTHER +}; + +struct seg_details { + const char * name; + int64_t * vaddr; + int64_t size; +}; + +static struct seg_details kmap[6]; + +struct const_details { + const char * name; + int64_t * vaddr; +}; + +static struct const_details consts[6]; + +void DumpSegments(void); +void DumpConstants(void); +void initailize_names(void); + +extern struct bootspace bootspace; + +void +DumpSegments(void) +{ + size_t i; + + // Copy the addresses of the bootspace structure into our kernel structure + + for (i = 0; i < BTSPACE_NSEGS; i++) { + if (bootspace.segs[i].type == BTSEG_NONE) { + continue; + } + kmap[bootspace.segs[i].type].vaddr = (void *)bootspace.segs[i].va; + kmap[bootspace.segs[i].type].size = bootspace.segs[i].sz; + } + + // Find the address and size of the Heap and the Stack and copy them. + + kmap[BTSEG_HEAP].vaddr = (void *)VM_MIN_KERNEL_ADDRESS; + kmap[BTSEG_HEAP].size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; + kmap[BTSEG_STACK].vaddr = (void *)VM_MIN_KERNEL_ADDRESS; + kmap[BTSEG_STACK].size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; +} + +void +DumpConstants(void) +{ + consts[MINKERN].vaddr = (void *)VM_MIN_KERNEL_ADDRESS; + consts[MAXKERN].vaddr = (void *)VM_MAX_KERNEL_ADDRESS; + consts[MINVM].vaddr = (void *)VM_MIN_ADDRESS; + consts[MAXVM].vaddr = (void *)VM_MAX_ADDRESS; +} + + +void +initailize_names(void) +{ + + kmap[BTSEG_NONE].name = "none"; + kmap[BTSEG_TEXT].name = "text"; + kmap[BTSEG_RODATA].name = "rodata"; + kmap[BTSEG_DATA].name = "data"; + kmap[BTSEG_HEAP].name = "heap"; + kmap[BTSEG_STACK].name = "stack"; + + consts[MINKERN].name = "Minimum Kernel address" ; + consts[MAXKERN].name = "Maximum Kernel address"; + consts[MINVM].name = "Minimum User address"; + consts[MAXVM].name = "Maximum User address"; +} + + +int +kernel_map_open(dev_t self __unused, int flag __unused, int mode __unused, + struct lwp *l __unused) +{ + + // Initalize the name of all the regions + + initailize_names(); + + // Dump all the regions into the structure. + + DumpSegments(); + + // Dump all constants into the structure + + DumpConstants(); + + return 0; +} + +int +kernel_map_close(dev_t self __unused, int flag __unused, int mode __unused, + struct lwp *l __unused) +{ + return 0; +} + +int +kernel_map_read(dev_t self __unused, struct uio *uio, int flags __unused) +{ + size_t i; + + printf("\n------ Segments ------\n"); + + for(i = 1; i < NO_OF_SEGS; i++) { + printf("Segment %zu (%s): va=%p size=%zu\n", i, + kmap[i].name, kmap[i].vaddr, kmap[i].size); + } + + printf("\n------ Kernel and User Map Constants ------\n"); + + for(i = 0; i < NO_OF_SEGS - 2; i++) { + printf("Constant %zu (%s): va=%p\n", i, consts[i].name, + consts[i].vaddr); + } + + + return 0; +} + +MODULE(MODULE_CLASS_MISC, kernel_map, NULL); + +static int +kernel_map_modcmd(modcmd_t cmd, void *arg __unused) +{ + /* The major should be verified and changed if needed to avoid + * conflicts with other devices. */ + int cmajor = 210, bmajor = -1; + + switch (cmd) { + case MODULE_CMD_INIT: + if (devsw_attach("kernel_map", NULL, &bmajor, &kernel_map_cdevsw, + &cmajor)) + return ENXIO; + return 0; + case MODULE_CMD_FINI: + devsw_detach(NULL, &kernel_map_cdevsw); + return 0; + default: + return ENOTTY; + } +} diff -uNr src.git-mirror/sys/sys/kasan.h src.git/sys/sys/kasan.h --- src.git-mirror/sys/sys/kasan.h 1970-01-01 01:00:00.000000000 +0100 +++ src.git/sys/sys/kasan.h 2018-08-17 18:37:26.835621232 +0200 @@ -0,0 +1,278 @@ +#ifndef _SYS_KASAN_H +#define _SYS_KASAN_H + +#include +#include +#include + +struct kmem_cache; +struct page; +struct vm_struct; +struct task_struct; + +typedef uint32_t u32; + + +/* + * Start of necessary Macros + */ + +#define CONFIG_KASAN_SHADOW_OFFSET 0 +#define KASAN_SHADOW_OFFSET 0UL +#define KASAN_SHADOW_SCALE_SHIFT 3 +#define __VIRTUAL_MASK_SHIFT 47 + +/* + * Compiler uses shadow offset assuming that addresses start + * from 0. Kernel addresses don't start from 0, so shadow + * for kernel really starts from compiler's shadow offset + + * 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT + */ +#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + VM_MAX_KERNEL_ADDRESS) +#define KASAN_SHADOW_END (KASAN_SHADOW_START + \ + (1ULL << (__VIRTUAL_MASK_SHIFT - \ + KASAN_SHADOW_SCALE_SHIFT))) + +#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT) +#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1) + +#define KASAN_FREE_PAGE 0xFF /* page was freed */ +#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */ +#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */ +#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */ +#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */ + +/* + * Stack redzone shadow values + * (Those are compiler's ABI, don't change them) + */ +#define KASAN_STACK_LEFT 0xF1 +#define KASAN_STACK_MID 0xF2 +#define KASAN_STACK_RIGHT 0xF3 +#define KASAN_STACK_PARTIAL 0xF4 +#define KASAN_USE_AFTER_SCOPE 0xF8 + +/* + * alloca redzone shadow values + */ +#define KASAN_ALLOCA_LEFT 0xCA +#define KASAN_ALLOCA_RIGHT 0xCB + +#define KASAN_ALLOCA_REDZONE_SIZE 32 + +/* Don't break randconfig/all*config builds */ +#ifndef KASAN_ABI_VERSION +#define KASAN_ABI_VERSION 1 +#endif + +/* + * End of Macros + */ + +/* + * Start of Structure definitions + */ + +//int kasan_depth; + +struct kasan_bug_info { + /* buffers to store report parts */ + const char *start; + const char *end; + char *bug_type; + char *bug_info; + + /* Varible to store important details */ + const void *access_addr; + const void *first_bad_addr; + size_t access_size; + bool is_write; + unsigned long ip; +}; + + + +/* The layout of struct dictated by compiler */ +struct kasan_source_location { + const char *filename; + int line_no; + int column_no; +}; + +/* The layout of struct dictated by compiler */ +struct kasan_global { + const void *beg; /* Address of the beginning of the global variable. */ + size_t size; /* Size of the global variable. */ + size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */ + const void *name; + const void *module_name; /* Name of the module where the global variable is declared. */ + unsigned long has_dynamic_init; /* This needed for C++ */ +#if KASAN_ABI_VERSION >= 4 + struct kasan_source_location *location; +#endif +#if KASAN_ABI_VERSION >= 5 + char *odr_indicator; +#endif +}; + +/** + * Structures to keep alloc and free tracks * + */ + +#define KASAN_STACK_DEPTH 64 + +struct kasan_track { + u32 pid; +// depot_stack_handle_t stack; +}; + +struct kasan_alloc_meta { + struct kasan_track alloc_track; + struct kasan_track free_track; +}; + +struct qlist_node { + struct qlist_node *next; +}; +struct kasan_free_meta { + /* This field is used while the object is in the quarantine. + * Otherwise it might be used for the allocator freelist. + */ + struct qlist_node quarantine_link; +}; + +/* + * End of Strcuture definitions + */ + +/* + * Start of Shadow translation functions + */ + +static inline const void *kasan_shadow_to_mem(const void *shadow_addr) +{ + return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET) + << KASAN_SHADOW_SCALE_SHIFT); +} + +static inline void *kasan_mem_to_shadow(const void *addr) +{ + return (void *)(((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) + + KASAN_SHADOW_OFFSET); +} + +/* + * End of Shadow translation functions + */ + +/* + * Start of Function prototypes + */ + +/* kasan_init.c */ + +/* All the kasan init functions for page traversal have been removed */ +void kasan_early_init(void); +void kasan_init(void); + +/* kern_asan.c */ +extern void kasan_enable_current(void); +extern void kasan_disable_current(void); +void kasan_unpoison_shadow(const void *address, size_t size); + +void kasan_unpoison_task_stack(struct lwp *task); +void kasan_unpoison_stack_above_sp_to(const void *watermark); + +void kasan_alloc_pages(struct page *page, unsigned int order); +void kasan_free_pages(struct page *page, unsigned int order); + +void kasan_cache_create(struct pool_cache *cache, size_t *size, + unsigned int *flags); + +void kasan_cache_shrink(struct pool_cache *cache); +void kasan_cache_shutdown(struct pool_cache *cache); + +void kasan_poison_slab(struct page *page); +void kasan_unpoison_object_data(struct pool_cache *cache, void *object); +void kasan_poison_object_data(struct pool_cache *cache, void *object); +void kasan_init_slab_obj(struct pool_cache *cache, const void *object); + +void kasan_kmalloc_large(const void *ptr, size_t size, unsigned int flags); +void kasan_kfree_large(void *ptr, unsigned long ip); +void kasan_poison_kfree(void *ptr, unsigned long ip); +void kasan_kmalloc(struct pool_cache *s, const void *object, size_t size, + unsigned int flags); +void kasan_krealloc(const void *object, size_t new_size, unsigned int flags); + +void kasan_slab_alloc(struct pool_cache *s, void *object, unsigned int flags); +bool kasan_slab_free(struct pool_cache *s, void *object, unsigned long ip); + + +int kasan_module_alloc(void *addr, size_t size); +void kasan_free_shadow(const struct vm_struct *vm); + +size_t ksize(const void *); +static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } +size_t kasan_metadata_size(struct pool_cache *cache); + +bool kasan_save_enable_multi_shot(void); +void kasan_restore_multi_shot(bool enabled); + +struct kasan_alloc_meta *get_alloc_info(struct pool_cache *cache, + const void *object); +struct kasan_free_meta *get_free_info(struct pool_cache *cache, + const void *object); + +void kasan_unpoison_task_stack_below(const void *watermark); +void __asan_register_globals(struct kasan_global *globals, size_t size); +void __asan_unregister_globals(struct kasan_global *globals, size_t size); +void __asan_loadN(unsigned long addr, size_t size); +void __asan_storeN(unsigned long addr, size_t size); +void __asan_handle_no_return(void); +void __asan_poison_stack_memory(const void *addr, size_t size); +void __asan_unpoison_stack_memory(const void *addr, size_t size); +void __asan_alloca_poison(unsigned long addr, size_t size); +void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom); + +void __asan_load1(unsigned long addr); +void __asan_store1(unsigned long addr); +void __asan_load2(unsigned long addr); +void __asan_store2(unsigned long addr); +void __asan_load4(unsigned long addr); +void __asan_store4(unsigned long addr); +void __asan_load8(unsigned long addr); +void __asan_store8(unsigned long addr); +void __asan_load16(unsigned long addr); +void __asan_store16(unsigned long addr); + +void __asan_load1_noabort(unsigned long addr); +void __asan_store1_noabort(unsigned long addr); +void __asan_load2_noabort(unsigned long addr); +void __asan_store2_noabort(unsigned long addr); +void __asan_load4_noabort(unsigned long addr); +void __asan_store4_noabort(unsigned long addr); +void __asan_load8_noabort(unsigned long addr); +void __asan_store8_noabort(unsigned long addr); +void __asan_load16_noabort(unsigned long addr); +void __asan_store16_noabort(unsigned long addr); + +void __asan_set_shadow_00(void *addr, size_t size); +void __asan_set_shadow_f1(void *addr, size_t size); +void __asan_set_shadow_f2(void *addr, size_t size); +void __asan_set_shadow_f3(void *addr, size_t size); +void __asan_set_shadow_f5(void *addr, size_t size); +void __asan_set_shadow_f8(void *addr, size_t size); + +/* kern_asan_report.c */ + +void kasan_report(unsigned long addr, size_t size, + bool is_write, unsigned long ip); +void kasan_report_invalid_free(void *object, unsigned long ip); + +/* kern_asan_quarantine.c */ + +void quarantine_put(struct kasan_free_meta *info, struct pool_cache *cache); +void quarantine_reduce(void); +void quarantine_remove_cache(struct pool_cache *cache); + +#endif diff -uNr src.git-mirror/sys/sys/pool.h src.git/sys/sys/pool.h --- src.git-mirror/sys/sys/pool.h 2018-08-17 18:51:35.816284105 +0200 +++ src.git/sys/sys/pool.h 2018-08-17 18:37:26.851893942 +0200 @@ -235,6 +235,12 @@ #endif } pool_cache_cpu_t; +struct kasan_cache { + int alloc_meta_offset; + int free_meta_offset; +}; + + struct pool_cache { /* Pool layer. */ struct pool pc_pool; @@ -265,6 +271,7 @@ bool pc_redzone; size_t pc_reqsize; + struct kasan_cache kasan_info; /* CPU layer. */ pool_cache_cpu_t pc_cpu0 __aligned(CACHE_LINE_SIZE); void *pc_cpus[MAXCPUS] __aligned(CACHE_LINE_SIZE);