$ git remote -v
origin  https://fuchsia.googlesource.com/fuchsia/ (fetch)
origin  https://fuchsia.googlesource.com/fuchsia/ (push)
$ git log -1
commit c389f7e588807f0a263c71eb1f1742b6405bcabc (HEAD -> master, origin/master, origin/HEAD)
Author: Jeremy Manson 
Date:   Thu Aug 29 17:39:18 2019 +0000

    [fidl][build] Create a file list of FIDL JSON for each SDK.
    
    This identifies and lists all JSON IR that will be generated for a given
    SDK.
    
    An existing artifact, //build/fidl:all_fidl_json, gives you all
    of the FIDL JSON provided by the build, but this depends on the
    entire build, so it can't be depended upon by any individual target
    without introducing a circularity.
    
    This lets tools that are built in-tree and need a collection of FIDL
    IR use all of the IR needed for a specific SDK.
    
    Change-Id: I81c6c31c29e98a8cc2c2d64971cc89cc4efdad4d
$ pwd
/public/fuchsia/zircon/kernel
$ git grep --color=always -C20 -i -E 'safe.?stack' |aha

The result of the above command is here:

arch/arm64/BUILD.gn-    ]
arch/arm64/BUILD.gn-
arch/arm64/BUILD.gn-    cflags = [
arch/arm64/BUILD.gn-      "-fpie",
arch/arm64/BUILD.gn-
arch/arm64/BUILD.gn-      # With the MMU disabled, alignment checking is always enabled.  So make
arch/arm64/BUILD.gn-      # sure the compiler doesn't use any unaligned memory accesses.
arch/arm64/BUILD.gn-      "-mstrict-align",
arch/arm64/BUILD.gn-    ]
arch/arm64/BUILD.gn-
arch/arm64/BUILD.gn-    if (!is_gcc) {
arch/arm64/BUILD.gn-      # TODO(TC-237): cflags += [ "-mcmodel=tiny" ]
arch/arm64/BUILD.gn-    }
arch/arm64/BUILD.gn-
arch/arm64/BUILD.gn-    include_dirs = [ "include" ]
arch/arm64/BUILD.gn-  }
arch/arm64/BUILD.gn-
arch/arm64/BUILD.gn-  # This is used pervasively throughout the kernel on arm64.
arch/arm64/BUILD.gn-  config("kernel") {
arch/arm64/BUILD.gn-    if (!is_gcc) {
arch/arm64/BUILD.gn:      # Clang needs -mcmodel=kernel to tell it to use the right safe-stack
arch/arm64/BUILD.gn-      # ABI for the kernel.
arch/arm64/BUILD.gn-      cflags = [ "-mcmodel=kernel" ]
arch/arm64/BUILD.gn-    }
arch/arm64/BUILD.gn-
arch/arm64/BUILD.gn-    defines = [
arch/arm64/BUILD.gn-      "ARCH_ARM64",
arch/arm64/BUILD.gn-      "KERNEL_ASPACE_BASE=$kernel_aspace_base",
arch/arm64/BUILD.gn-      "KERNEL_ASPACE_SIZE=0x0001000000000000",
arch/arm64/BUILD.gn-      "USER_ASPACE_BASE=0x0000000001000000",
arch/arm64/BUILD.gn-      "USER_ASPACE_SIZE=0x0000fffffe000000",
arch/arm64/BUILD.gn-    ]
arch/arm64/BUILD.gn-
arch/arm64/BUILD.gn-    # For #include <arch/foo.h>.
arch/arm64/BUILD.gn-    include_dirs = [ "include" ]
arch/arm64/BUILD.gn-  }
arch/arm64/BUILD.gn-
arch/arm64/BUILD.gn-  source_set("arm64") {
arch/arm64/BUILD.gn-    sources = [
arch/arm64/BUILD.gn-      "arch.cc",
arch/arm64/BUILD.gn-      "asm.S",
--
arch/arm64/arch.cc-// Performance Monitor Control Register, EL0.
arch/arm64/arch.cc-static constexpr uint64_t PMCR_EL0_ENABLE_BIT = 1 << 0;
arch/arm64/arch.cc-static constexpr uint64_t PMCR_EL0_LONG_COUNTER_BIT = 1 << 6;
arch/arm64/arch.cc-
arch/arm64/arch.cc-// Performance Monitors User Enable Regiser, EL0.
arch/arm64/arch.cc-static constexpr uint64_t PMUSERENR_EL0_ENABLE = 1 << 0;  // Enable EL0 access to cycle counter.
arch/arm64/arch.cc-
arch/arm64/arch.cc-// System Control Register, EL1.
arch/arm64/arch.cc-static constexpr uint64_t SCTLR_EL1_UCI = 1 << 26;  // Allow certain cache ops in EL0.
arch/arm64/arch.cc-static constexpr uint64_t SCTLR_EL1_UCT = 1 << 15;  // Allow EL0 access to CTR register.
arch/arm64/arch.cc-static constexpr uint64_t SCTLR_EL1_DZE = 1 << 14;  // Allow EL0 to use DC ZVA.
arch/arm64/arch.cc-static constexpr uint64_t SCTLR_EL1_SA0 = 1 << 4;   // Enable Stack Alignment Check EL0.
arch/arm64/arch.cc-static constexpr uint64_t SCTLR_EL1_SA = 1 << 3;    // Enable Stack Alignment Check EL1.
arch/arm64/arch.cc-static constexpr uint64_t SCTLR_EL1_AC = 1 << 1;    // Enable Alignment Checking for EL1 EL0.
arch/arm64/arch.cc-
arch/arm64/arch.cc-struct arm64_sp_info_t {
arch/arm64/arch.cc-  uint64_t mpid;
arch/arm64/arch.cc-  void* sp;
arch/arm64/arch.cc-
arch/arm64/arch.cc-  // This part of the struct itself will serve temporarily as the
arch/arm64/arch.cc:  // fake arch_thread in the thread pointer, so that safe-stack
arch/arm64/arch.cc-  // and stack-protector code can work early.  The thread pointer
arch/arm64/arch.cc-  // (TPIDR_EL1) points just past arm64_sp_info_t.
arch/arm64/arch.cc-  uintptr_t stack_guard;
arch/arm64/arch.cc-  void* unsafe_sp;
arch/arm64/arch.cc-};
arch/arm64/arch.cc-
arch/arm64/arch.cc-static_assert(sizeof(arm64_sp_info_t) == 32, "check arm64_get_secondary_sp assembly");
arch/arm64/arch.cc-static_assert(offsetof(arm64_sp_info_t, sp) == 8, "check arm64_get_secondary_sp assembly");
arch/arm64/arch.cc-static_assert(offsetof(arm64_sp_info_t, mpid) == 0, "check arm64_get_secondary_sp assembly");
arch/arm64/arch.cc-
arch/arm64/arch.cc-#define TP_OFFSET(field) ((int)offsetof(arm64_sp_info_t, field) - (int)sizeof(arm64_sp_info_t))
arch/arm64/arch.cc-static_assert(TP_OFFSET(stack_guard) == ZX_TLS_STACK_GUARD_OFFSET, "");
arch/arm64/arch.cc-static_assert(TP_OFFSET(unsafe_sp) == ZX_TLS_UNSAFE_SP_OFFSET, "");
arch/arm64/arch.cc-#undef TP_OFFSET
arch/arm64/arch.cc-
arch/arm64/arch.cc-// Used to hold up the boot sequence on secondary CPUs until signaled by the primary.
arch/arm64/arch.cc-static int secondaries_released = 0;
arch/arm64/arch.cc-
arch/arm64/arch.cc-static volatile int secondaries_to_init = 0;
arch/arm64/arch.cc-
--
arch/arm64/arch.cc-// one for each CPU
arch/arm64/arch.cc-arm64_sp_info_t arm64_secondary_sp_list[SMP_MAX_CPUS];
arch/arm64/arch.cc-
arch/arm64/arch.cc-extern uint64_t arch_boot_el;  // Defined in start.S.
arch/arm64/arch.cc-
arch/arm64/arch.cc-uint64_t arm64_get_boot_el() { return arch_boot_el >> 2; }
arch/arm64/arch.cc-
arch/arm64/arch.cc-zx_status_t arm64_create_secondary_stack(uint cpu_num, uint64_t mpid) {
arch/arm64/arch.cc-  // Allocate a stack, indexed by CPU num so that |arm64_secondary_entry| can find it.
arch/arm64/arch.cc-  DEBUG_ASSERT_MSG(cpu_num > 0 && cpu_num < SMP_MAX_CPUS, "cpu_num: %u", cpu_num);
arch/arm64/arch.cc-  kstack_t* stack = &_init_thread[cpu_num - 1].stack;
arch/arm64/arch.cc-  DEBUG_ASSERT(stack->base == 0);
arch/arm64/arch.cc-  zx_status_t status = vm_allocate_kstack(stack);
arch/arm64/arch.cc-  if (status != ZX_OK) {
arch/arm64/arch.cc-    return status;
arch/arm64/arch.cc-  }
arch/arm64/arch.cc-
arch/arm64/arch.cc-  // Get the stack pointers.
arch/arm64/arch.cc-  void* sp = reinterpret_cast<void*>(stack->top);
arch/arm64/arch.cc-  void* unsafe_sp = nullptr;
arch/arm64/arch.cc:#if __has_feature(safe_stack)
arch/arm64/arch.cc-  DEBUG_ASSERT(stack->unsafe_base != 0);
arch/arm64/arch.cc-  unsafe_sp = reinterpret_cast<void*>(stack->unsafe_base + stack->size);
arch/arm64/arch.cc-#endif
arch/arm64/arch.cc-
arch/arm64/arch.cc-  // Find an empty slot for the low-level stack info.
arch/arm64/arch.cc-  uint32_t i = 0;
arch/arm64/arch.cc-  while ((i < SMP_MAX_CPUS) && (arm64_secondary_sp_list[i].mpid != 0)) {
arch/arm64/arch.cc-    i++;
arch/arm64/arch.cc-  }
arch/arm64/arch.cc-  if (i == SMP_MAX_CPUS) {
arch/arm64/arch.cc-    return ZX_ERR_NO_RESOURCES;
arch/arm64/arch.cc-  }
arch/arm64/arch.cc-
arch/arm64/arch.cc-  // Store it.
arch/arm64/arch.cc-  LTRACEF("set mpid 0x%lx sp to %p\n", mpid, sp);
arch/arm64/arch.cc:#if __has_feature(safe_stack)
arch/arm64/arch.cc-  LTRACEF("set mpid 0x%lx unsafe-sp to %p\n", mpid, unsafe_sp);
arch/arm64/arch.cc-#endif
arch/arm64/arch.cc-  arm64_secondary_sp_list[i].mpid = mpid;
arch/arm64/arch.cc-  arm64_secondary_sp_list[i].sp = sp;
arch/arm64/arch.cc-  arm64_secondary_sp_list[i].stack_guard = get_current_thread()->arch.stack_guard;
arch/arm64/arch.cc-  arm64_secondary_sp_list[i].unsafe_sp = unsafe_sp;
arch/arm64/arch.cc-
arch/arm64/arch.cc-  return ZX_OK;
arch/arm64/arch.cc-}
arch/arm64/arch.cc-
arch/arm64/arch.cc-zx_status_t arm64_free_secondary_stack(uint cpu_num) {
arch/arm64/arch.cc-  DEBUG_ASSERT(cpu_num > 0 && cpu_num < SMP_MAX_CPUS);
arch/arm64/arch.cc-  kstack_t* stack = &_init_thread[cpu_num - 1].stack;
arch/arm64/arch.cc-  zx_status_t status = vm_free_kstack(stack);
arch/arm64/arch.cc-  return status;
arch/arm64/arch.cc-}
arch/arm64/arch.cc-
arch/arm64/arch.cc-static void arm64_cpu_early_init() {
arch/arm64/arch.cc-  // Make sure the per cpu pointer is set up.
arch/arm64/arch.cc-  arm64_init_percpu_early();
--
arch/arm64/boot-mmu.cc-const uintptr_t l2_large_page_size = 1UL << MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 2);
arch/arm64/boot-mmu.cc-const uintptr_t l2_large_page_size_mask = l2_large_page_size - 2;
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-static size_t vaddr_to_l0_index(uintptr_t addr) {
arch/arm64/boot-mmu.cc-  return (addr >> MMU_KERNEL_TOP_SHIFT) & (MMU_KERNEL_PAGE_TABLE_ENTRIES_TOP - 1);
arch/arm64/boot-mmu.cc-}
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-static size_t vaddr_to_l1_index(uintptr_t addr) {
arch/arm64/boot-mmu.cc-  return (addr >> MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 1)) & (MMU_KERNEL_PAGE_TABLE_ENTRIES - 1);
arch/arm64/boot-mmu.cc-}
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-static size_t vaddr_to_l2_index(uintptr_t addr) {
arch/arm64/boot-mmu.cc-  return (addr >> MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 2)) & (MMU_KERNEL_PAGE_TABLE_ENTRIES - 1);
arch/arm64/boot-mmu.cc-}
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-static size_t vaddr_to_l3_index(uintptr_t addr) {
arch/arm64/boot-mmu.cc-  return (addr >> MMU_LX_X(MMU_KERNEL_PAGE_SIZE_SHIFT, 3)) & (MMU_KERNEL_PAGE_TABLE_ENTRIES - 1);
arch/arm64/boot-mmu.cc-}
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-// called from start.S to grab another page to back a page table from the boot allocator
arch/arm64/boot-mmu.cc:__NO_SAFESTACK
arch/arm64/boot-mmu.cc-extern "C" pte_t* boot_alloc_ptable() {
arch/arm64/boot-mmu.cc-  // allocate a page out of the boot allocator, asking for a physical address
arch/arm64/boot-mmu.cc-  pte_t* ptr = reinterpret_cast<pte_t*>(boot_alloc_page_phys());
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-  // avoid using memset, since this relies on dc zva instruction, which isn't set up at
arch/arm64/boot-mmu.cc-  // this point in the boot process
arch/arm64/boot-mmu.cc-  // use a volatile pointer to make sure
arch/arm64/boot-mmu.cc-  volatile pte_t* vptr = ptr;
arch/arm64/boot-mmu.cc-  for (auto i = 0; i < MMU_KERNEL_PAGE_TABLE_ENTRIES; i++) {
arch/arm64/boot-mmu.cc-    vptr[i] = 0;
arch/arm64/boot-mmu.cc-  }
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-  return ptr;
arch/arm64/boot-mmu.cc-}
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-// inner mapping routine passed two helper routines
arch/arm64/boot-mmu.cc:__NO_SAFESTACK
arch/arm64/boot-mmu.cc-static inline zx_status_t _arm64_boot_map(pte_t* kernel_table0, const vaddr_t vaddr,
arch/arm64/boot-mmu.cc-                                          const paddr_t paddr, const size_t len, const pte_t flags,
arch/arm64/boot-mmu.cc-                                          paddr_t (*alloc_func)(), pte_t* phys_to_virt(paddr_t)) {
arch/arm64/boot-mmu.cc-  // loop through the virtual range and map each physical page, using the largest
arch/arm64/boot-mmu.cc-  // page size supported. Allocates necessar page tables along the way.
arch/arm64/boot-mmu.cc-  size_t off = 0;
arch/arm64/boot-mmu.cc-  while (off < len) {
arch/arm64/boot-mmu.cc-    // make sure the level 1 pointer is valid
arch/arm64/boot-mmu.cc-    size_t index0 = vaddr_to_l0_index(vaddr + off);
arch/arm64/boot-mmu.cc-    pte_t* kernel_table1 = nullptr;
arch/arm64/boot-mmu.cc-    switch (kernel_table0[index0] & MMU_PTE_DESCRIPTOR_MASK) {
arch/arm64/boot-mmu.cc-      default: {  // invalid/unused entry
arch/arm64/boot-mmu.cc-        paddr_t pa = alloc_func();
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-        kernel_table0[index0] = (pa & MMU_PTE_OUTPUT_ADDR_MASK) | MMU_PTE_L012_DESCRIPTOR_TABLE;
arch/arm64/boot-mmu.cc-        __FALLTHROUGH;
arch/arm64/boot-mmu.cc-      }
arch/arm64/boot-mmu.cc-      case MMU_PTE_L012_DESCRIPTOR_TABLE:
arch/arm64/boot-mmu.cc-        kernel_table1 = phys_to_virt(kernel_table0[index0] & MMU_PTE_OUTPUT_ADDR_MASK);
arch/arm64/boot-mmu.cc-        break;
--
arch/arm64/boot-mmu.cc-        kernel_table3 = phys_to_virt(kernel_table2[index2] & MMU_PTE_OUTPUT_ADDR_MASK);
arch/arm64/boot-mmu.cc-        break;
arch/arm64/boot-mmu.cc-      case MMU_PTE_L012_DESCRIPTOR_BLOCK:
arch/arm64/boot-mmu.cc-        // not legal to have a block pointer at this level
arch/arm64/boot-mmu.cc-        return ZX_ERR_BAD_STATE;
arch/arm64/boot-mmu.cc-    }
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-    // generate a standard page mapping
arch/arm64/boot-mmu.cc-    size_t index3 = vaddr_to_l3_index(vaddr + off);
arch/arm64/boot-mmu.cc-    kernel_table3[index3] =
arch/arm64/boot-mmu.cc-        ((paddr + off) & MMU_PTE_OUTPUT_ADDR_MASK) | flags | MMU_PTE_L3_DESCRIPTOR_PAGE;
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-    off += PAGE_SIZE;
arch/arm64/boot-mmu.cc-  }
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-  return ZX_OK;
arch/arm64/boot-mmu.cc-}
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-// called from start.S to configure level 1-3 page tables to map the kernel wherever it is located
arch/arm64/boot-mmu.cc-// physically to KERNEL_BASE
arch/arm64/boot-mmu.cc:__NO_SAFESTACK
arch/arm64/boot-mmu.cc-extern "C" zx_status_t arm64_boot_map(pte_t* kernel_table0, const vaddr_t vaddr,
arch/arm64/boot-mmu.cc-                                      const paddr_t paddr, const size_t len, const pte_t flags) {
arch/arm64/boot-mmu.cc-  // the following helper routines assume that code is running in physical addressing mode (mmu
arch/arm64/boot-mmu.cc-  // off). any physical addresses calculated are assumed to be the same as virtual
arch/arm64/boot-mmu.cc-  auto alloc = []() -> paddr_t {
arch/arm64/boot-mmu.cc-    // allocate a page out of the boot allocator, asking for a physical address
arch/arm64/boot-mmu.cc-    paddr_t pa = boot_alloc_page_phys();
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-    // avoid using memset, since this relies on dc zva instruction, which isn't set up at
arch/arm64/boot-mmu.cc-    // this point in the boot process
arch/arm64/boot-mmu.cc-    // use a volatile pointer to make sure the compiler doesn't emit a memset call
arch/arm64/boot-mmu.cc-    volatile pte_t* vptr = reinterpret_cast<volatile pte_t*>(pa);
arch/arm64/boot-mmu.cc-    for (auto i = 0; i < MMU_KERNEL_PAGE_TABLE_ENTRIES; i++) {
arch/arm64/boot-mmu.cc-      vptr[i] = 0;
arch/arm64/boot-mmu.cc-    }
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-    return pa;
arch/arm64/boot-mmu.cc-  };
arch/arm64/boot-mmu.cc-
arch/arm64/boot-mmu.cc-  auto phys_to_virt = [](paddr_t pa) -> pte_t* { return reinterpret_cast<pte_t*>(pa); };
--
arch/arm64/fpu.cc-      "ldp     q2, q3, [%0, #(1 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q4, q5, [%0, #(2 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q6, q7, [%0, #(3 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q8, q9, [%0, #(4 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q10, q11, [%0, #(5 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q12, q13, [%0, #(6 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q14, q15, [%0, #(7 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q16, q17, [%0, #(8 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q18, q19, [%0, #(9 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q20, q21, [%0, #(10 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q22, q23, [%0, #(11 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q24, q25, [%0, #(12 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q26, q27, [%0, #(13 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q28, q29, [%0, #(14 * 32)]\n"
arch/arm64/fpu.cc-      "ldp     q30, q31, [%0, #(15 * 32)]\n"
arch/arm64/fpu.cc-      "msr     fpcr, %1\n"
arch/arm64/fpu.cc-      "msr     fpsr, %2\n" ::"r"(fpstate->regs),
arch/arm64/fpu.cc-      "r"((uint64_t)fpstate->fpcr), "r"((uint64_t)fpstate->fpsr));
arch/arm64/fpu.cc-}
arch/arm64/fpu.cc-
arch/arm64/fpu.cc:__NO_SAFESTACK static void arm64_fpu_save_state(thread_t* t) {
arch/arm64/fpu.cc-  struct fpstate* fpstate = &t->arch.fpstate;
arch/arm64/fpu.cc-
arch/arm64/fpu.cc-  LTRACEF("cpu %u, thread %s, save fpstate %p\n", arch_curr_cpu_num(), t->name, fpstate);
arch/arm64/fpu.cc-
arch/arm64/fpu.cc-  __asm__ volatile(
arch/arm64/fpu.cc-      "stp     q0, q1, [%0, #(0 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q2, q3, [%0, #(1 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q4, q5, [%0, #(2 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q6, q7, [%0, #(3 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q8, q9, [%0, #(4 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q10, q11, [%0, #(5 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q12, q13, [%0, #(6 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q14, q15, [%0, #(7 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q16, q17, [%0, #(8 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q18, q19, [%0, #(9 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q20, q21, [%0, #(10 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q22, q23, [%0, #(11 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q24, q25, [%0, #(12 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q26, q27, [%0, #(13 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q28, q29, [%0, #(14 * 32)]\n"
arch/arm64/fpu.cc-      "stp     q30, q31, [%0, #(15 * 32)]\n" ::"r"(fpstate->regs));
arch/arm64/fpu.cc-
arch/arm64/fpu.cc-  // These are 32-bit values, but the msr instruction always uses a
arch/arm64/fpu.cc-  // 64-bit destination register.
arch/arm64/fpu.cc-  uint64_t fpcr, fpsr;
arch/arm64/fpu.cc-  __asm__("mrs %0, fpcr\n" : "=r"(fpcr));
arch/arm64/fpu.cc-  __asm__("mrs %0, fpsr\n" : "=r"(fpsr));
arch/arm64/fpu.cc-  fpstate->fpcr = (uint32_t)fpcr;
arch/arm64/fpu.cc-  fpstate->fpsr = (uint32_t)fpsr;
arch/arm64/fpu.cc-
arch/arm64/fpu.cc-  LTRACEF("thread %s, fpcr %x, fpsr %x\n", t->name, fpstate->fpcr, fpstate->fpsr);
arch/arm64/fpu.cc-}
arch/arm64/fpu.cc-
arch/arm64/fpu.cc-/* save fpu state if the thread had dirtied it and disable the fpu */
arch/arm64/fpu.cc:__NO_SAFESTACK void arm64_fpu_context_switch(thread_t* oldthread, thread_t* newthread) {
arch/arm64/fpu.cc-  uint64_t cpacr = __arm_rsr64("cpacr_el1");
arch/arm64/fpu.cc-  if (is_fpu_enabled((uint32_t)cpacr)) {
arch/arm64/fpu.cc-    LTRACEF("saving state on thread %s\n", oldthread->name);
arch/arm64/fpu.cc-
arch/arm64/fpu.cc-    /* save the state */
arch/arm64/fpu.cc-    arm64_fpu_save_state(oldthread);
arch/arm64/fpu.cc-
arch/arm64/fpu.cc-    /* disable the fpu again */
arch/arm64/fpu.cc-    __arm_wsr64("cpacr_el1", cpacr & ~FPU_ENABLE_MASK);
arch/arm64/fpu.cc-    __isb(ARM_MB_SY);
arch/arm64/fpu.cc-  }
arch/arm64/fpu.cc-}
arch/arm64/fpu.cc-
arch/arm64/fpu.cc-/* called because of a fpu instruction used exception */
arch/arm64/fpu.cc-void arm64_fpu_exception(arm64_iframe_t* iframe, uint exception_flags) {
arch/arm64/fpu.cc-  LTRACEF("cpu %u, thread %s, flags 0x%x\n", arch_curr_cpu_num(), get_current_thread()->name,
arch/arm64/fpu.cc-          exception_flags);
arch/arm64/fpu.cc-
arch/arm64/fpu.cc-  /* only valid to be called if exception came from lower level */
arch/arm64/fpu.cc-  DEBUG_ASSERT(exception_flags & ARM64_EXCEPTION_FLAG_LOWER_EL);
--
arch/arm64/start.S-.Lmmu_on_vaddr:
arch/arm64/start.S-    /* Disable trampoline page-table in ttbr0 */
arch/arm64/start.S-    movlit  tmp, MMU_TCR_FLAGS_KERNEL
arch/arm64/start.S-    msr     tcr_el1, tmp
arch/arm64/start.S-    isb
arch/arm64/start.S-
arch/arm64/start.S-    /* Invalidate TLB */
arch/arm64/start.S-    tlbi    vmalle1
arch/arm64/start.S-    isb
arch/arm64/start.S-
arch/arm64/start.S-    cbnz    cpuid, .Lsecondary_boot
arch/arm64/start.S-
arch/arm64/start.S-    // set up the boot stack for real
arch/arm64/start.S-    adr_global tmp, boot_cpu_kstack_end
arch/arm64/start.S-    mov     sp, tmp
arch/arm64/start.S-
arch/arm64/start.S-    // Set the thread pointer early so compiler-generated references
arch/arm64/start.S-    // to the stack-guard and unsafe-sp slots work.  This is not a
arch/arm64/start.S-    // real 'struct thread' yet, just a pointer to (past, actually)
arch/arm64/start.S-    // the two slots used by the ABI known to the compiler.  This avoids
arch/arm64/start.S:    // having to compile-time disable safe-stack and stack-protector
arch/arm64/start.S-    // code generation features for all the C code in the bootstrap
arch/arm64/start.S-    // path, which (unlike on x86, e.g.) is enough to get annoying.
arch/arm64/start.S-    adr_global tmp, boot_cpu_fake_thread_pointer_location
arch/arm64/start.S-    msr     tpidr_el1, tmp
arch/arm64/start.S-
arch/arm64/start.S-    // set the per cpu pointer for cpu 0
arch/arm64/start.S-    adr_global x18, arm64_percpu_array
arch/arm64/start.S-
arch/arm64/start.S-    // Choose a good (ideally random) stack-guard value as early as possible.
arch/arm64/start.S-    bl      choose_stack_guard
arch/arm64/start.S-    mrs     tmp, tpidr_el1
arch/arm64/start.S-    str     x0, [tmp, #ZX_TLS_STACK_GUARD_OFFSET]
arch/arm64/start.S-    // Don't leak the value to other code.
arch/arm64/start.S-    mov     x0, xzr
arch/arm64/start.S-
arch/arm64/start.S-    bl  lk_main
arch/arm64/start.S-    b   .
arch/arm64/start.S-
arch/arm64/start.S-.Lsecondary_boot:
arch/arm64/start.S-    bl      arm64_get_secondary_sp
--
arch/arm64/start.S-// clearing the .bss, so put them in .data so they don't get zeroed.
arch/arm64/start.S-.data
arch/arm64/start.S-    .balign 64
arch/arm64/start.S-DATA(arch_boot_el)
arch/arm64/start.S-    .quad 0xdeadbeef00ff00ff
arch/arm64/start.S-END_DATA(arch_boot_el)
arch/arm64/start.S-DATA(zbi_paddr)
arch/arm64/start.S-    .quad -1
arch/arm64/start.S-END_DATA(zbi_paddr)
arch/arm64/start.S-DATA(kernel_entry_paddr)
arch/arm64/start.S-    .quad -1
arch/arm64/start.S-END_DATA(kernel_entry_paddr)
arch/arm64/start.S-
arch/arm64/start.S-DATA(page_tables_not_ready)
arch/arm64/start.S-    .long       1
arch/arm64/start.S-END_DATA(page_tables_not_ready)
arch/arm64/start.S-
arch/arm64/start.S-    .balign 8
arch/arm64/start.S-LOCAL_DATA(boot_cpu_fake_arch_thread)
arch/arm64/start.S-    .quad 0xdeadbeef1ee2d00d // stack_guard
arch/arm64/start.S:#if __has_feature(safe_stack)
arch/arm64/start.S-    .quad boot_cpu_unsafe_kstack_end
arch/arm64/start.S-#else
arch/arm64/start.S-    .quad 0
arch/arm64/start.S-#endif
arch/arm64/start.S-LOCAL_DATA(boot_cpu_fake_thread_pointer_location)
arch/arm64/start.S-END_DATA(boot_cpu_fake_arch_thread)
arch/arm64/start.S-
arch/arm64/start.S-.bss
arch/arm64/start.S-LOCAL_DATA(boot_cpu_kstack)
arch/arm64/start.S-    .skip ARCH_DEFAULT_STACK_SIZE
arch/arm64/start.S-    .balign 16
arch/arm64/start.S-LOCAL_DATA(boot_cpu_kstack_end)
arch/arm64/start.S-END_DATA(boot_cpu_kstack)
arch/arm64/start.S-
arch/arm64/start.S:#if __has_feature(safe_stack)
arch/arm64/start.S-LOCAL_DATA(boot_cpu_unsafe_kstack)
arch/arm64/start.S-    .skip ARCH_DEFAULT_STACK_SIZE
arch/arm64/start.S-    .balign 16
arch/arm64/start.S-LOCAL_DATA(boot_cpu_unsafe_kstack_end)
arch/arm64/start.S-END_DATA(boot_cpu_unsafe_kstack)
arch/arm64/start.S-#endif
arch/arm64/start.S-
arch/arm64/start.S-.section .bss.prebss.translation_table, "aw", @nobits
arch/arm64/start.S-.align 3 + MMU_PAGE_TABLE_ENTRIES_IDENT_SHIFT
arch/arm64/start.S-DATA(tt_trampoline)
arch/arm64/start.S-    .skip 8 * MMU_PAGE_TABLE_ENTRIES_IDENT
arch/arm64/start.S-END_DATA(tt_trampoline)
arch/arm64/start.S-
arch/arm64/start.S-// This symbol is used by image.S
arch/arm64/start.S-.global IMAGE_ELF_ENTRY
arch/arm64/start.S-IMAGE_ELF_ENTRY = _start
arch/arm64/start.S-
arch/arm64/start.S-// This symbol is used by gdb python to know the base of the kernel module
arch/arm64/start.S-.global KERNEL_BASE_ADDRESS
arch/arm64/start.S-KERNEL_BASE_ADDRESS = KERNEL_BASE
--
arch/arm64/thread.cc-  // create a default stack frame on the stack
arch/arm64/thread.cc-  vaddr_t stack_top = t->stack.top;
arch/arm64/thread.cc-
arch/arm64/thread.cc-  // make sure the top of the stack is 16 byte aligned for EABI compliance
arch/arm64/thread.cc-  stack_top = ROUNDDOWN(stack_top, 16);
arch/arm64/thread.cc-  t->stack.top = stack_top;
arch/arm64/thread.cc-
arch/arm64/thread.cc-  struct arm64_context_switch_frame* frame = (struct arm64_context_switch_frame*)(stack_top);
arch/arm64/thread.cc-  frame--;
arch/arm64/thread.cc-
arch/arm64/thread.cc-  // fill in the entry point
arch/arm64/thread.cc-  frame->lr = entry_point;
arch/arm64/thread.cc-
arch/arm64/thread.cc-  // This is really a global (boot-time) constant value.
arch/arm64/thread.cc-  // But it's stored in each thread struct to satisfy the
arch/arm64/thread.cc-  // compiler ABI (TPIDR_EL1 + ZX_TLS_STACK_GUARD_OFFSET).
arch/arm64/thread.cc-  t->arch.stack_guard = get_current_thread()->arch.stack_guard;
arch/arm64/thread.cc-
arch/arm64/thread.cc-  // set the stack pointer
arch/arm64/thread.cc-  t->arch.sp = (vaddr_t)frame;
arch/arm64/thread.cc:#if __has_feature(safe_stack)
arch/arm64/thread.cc-  t->arch.unsafe_sp = ROUNDDOWN(t->stack.unsafe_base + t->stack.size, 16);
arch/arm64/thread.cc-#endif
arch/arm64/thread.cc-
arch/arm64/thread.cc-  // Initialize the debug state to a valid initial state.
arch/arm64/thread.cc-  for (size_t i = 0; i < ARM64_MAX_HW_BREAKPOINTS; i++) {
arch/arm64/thread.cc-    t->arch.debug_state.hw_bps[i].dbgbcr = 0;
arch/arm64/thread.cc-    t->arch.debug_state.hw_bps[i].dbgbvr = 0;
arch/arm64/thread.cc-  }
arch/arm64/thread.cc-}
arch/arm64/thread.cc-
arch/arm64/thread.cc:__NO_SAFESTACK void arch_thread_construct_first(thread_t* t) {
arch/arm64/thread.cc-  // Propagate the values from the fake arch_thread that the thread
arch/arm64/thread.cc-  // pointer points to now (set up in start.S) into the real thread
arch/arm64/thread.cc-  // structure being set up now.
arch/arm64/thread.cc-  thread_t* fake = get_current_thread();
arch/arm64/thread.cc-  t->arch.stack_guard = fake->arch.stack_guard;
arch/arm64/thread.cc-  t->arch.unsafe_sp = fake->arch.unsafe_sp;
arch/arm64/thread.cc-
arch/arm64/thread.cc-  // make sure the thread saves a copy of the current cpu pointer
arch/arm64/thread.cc-  t->arch.current_percpu_ptr = arm64_read_percpu_ptr();
arch/arm64/thread.cc-
arch/arm64/thread.cc-  // Force the thread pointer immediately to the real struct.  This way
arch/arm64/thread.cc:  // our callers don't have to avoid safe-stack code or risk losing track
arch/arm64/thread.cc-  // of the unsafe_sp value.  The caller's unsafe_sp value is visible at
arch/arm64/thread.cc-  // TPIDR_EL1 + ZX_TLS_UNSAFE_SP_OFFSET as expected, though TPIDR_EL1
arch/arm64/thread.cc-  // happens to have changed.  (We're assuming that the compiler doesn't
arch/arm64/thread.cc-  // decide to cache the TPIDR_EL1 value across this function call, which
arch/arm64/thread.cc-  // would be pointless since it's just one instruction to fetch it afresh.)
arch/arm64/thread.cc-  set_current_thread(t);
arch/arm64/thread.cc-}
arch/arm64/thread.cc-
arch/arm64/thread.cc:__NO_SAFESTACK void arch_context_switch(thread_t* oldthread, thread_t* newthread) {
arch/arm64/thread.cc-  LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
arch/arm64/thread.cc-  __dsb(ARM_MB_SY); /* broadcast tlb operations in case the thread moves to another cpu */
arch/arm64/thread.cc-
arch/arm64/thread.cc-  /* set the current cpu pointer in the new thread's structure so it can be
arch/arm64/thread.cc-   * restored on exception entry.
arch/arm64/thread.cc-   */
arch/arm64/thread.cc-  newthread->arch.current_percpu_ptr = arm64_read_percpu_ptr();
arch/arm64/thread.cc-
arch/arm64/thread.cc-  arm64_fpu_context_switch(oldthread, newthread);
arch/arm64/thread.cc-  arm64_debug_state_context_switch(oldthread, newthread);
arch/arm64/thread.cc-  arm64_context_switch(&oldthread->arch.sp, newthread->arch.sp);
arch/arm64/thread.cc-}
arch/arm64/thread.cc-
arch/arm64/thread.cc-void arch_dump_thread(thread_t* t) {
arch/arm64/thread.cc-  if (t->state != THREAD_RUNNING) {
arch/arm64/thread.cc-    dprintf(INFO, "\tarch: ");
arch/arm64/thread.cc-    dprintf(INFO, "sp 0x%lx\n", t->arch.sp);
arch/arm64/thread.cc-  }
arch/arm64/thread.cc-}
arch/arm64/thread.cc-
--
arch/x86/BUILD.gn-    "-msoft-float",
arch/x86/BUILD.gn-    "-mno-mmx",
arch/x86/BUILD.gn-    "-mno-sse",
arch/x86/BUILD.gn-    "-mno-sse2",
arch/x86/BUILD.gn-    "-mno-3dnow",
arch/x86/BUILD.gn-    "-mno-avx",
arch/x86/BUILD.gn-    "-mno-avx2",
arch/x86/BUILD.gn-  ]
arch/x86/BUILD.gn-
arch/x86/BUILD.gn-  if (is_gcc) {
arch/x86/BUILD.gn-    cflags += [
arch/x86/BUILD.gn-      "-falign-jumps=1",
arch/x86/BUILD.gn-      "-falign-loops=1",
arch/x86/BUILD.gn-      "-falign-functions=4",
arch/x86/BUILD.gn-
arch/x86/BUILD.gn-      # Optimization: Since FPU is disabled, do not pass flag in %rax to
arch/x86/BUILD.gn-      # varargs routines that floating point args are in use.
arch/x86/BUILD.gn-      "-mskip-rax-setup",
arch/x86/BUILD.gn-    ]
arch/x86/BUILD.gn-  } else {
arch/x86/BUILD.gn:    # Clang needs -mcmodel=kernel to tell it to use the right safe-stack
arch/x86/BUILD.gn-    # ABI for the kernel.
arch/x86/BUILD.gn-    cflags += [ "-mcmodel=kernel" ]
arch/x86/BUILD.gn-  }
arch/x86/BUILD.gn-
arch/x86/BUILD.gn-  defines = [
arch/x86/BUILD.gn-    "ARCH_X86",
arch/x86/BUILD.gn-    "KERNEL_ASPACE_BASE=$kernel_aspace_base",
arch/x86/BUILD.gn-    "KERNEL_ASPACE_SIZE=0x0000008000000000UL",
arch/x86/BUILD.gn-
arch/x86/BUILD.gn-    "USER_ASPACE_BASE=0x0000000001000000UL",  # 16MB
arch/x86/BUILD.gn-
arch/x86/BUILD.gn-    # We set the top of user address space to be (1 << 47) - 4k.  See
arch/x86/BUILD.gn-    # docs/sysret_problem.md for why we subtract 4k here.  Subtracting
arch/x86/BUILD.gn-    # USER_ASPACE_BASE from that value gives the value for USER_ASPACE_SIZE
arch/x86/BUILD.gn-    # below.
arch/x86/BUILD.gn-    "USER_ASPACE_SIZE=0x00007ffffefff000UL",
arch/x86/BUILD.gn-
arch/x86/BUILD.gn-    "KERNEL_LOAD_OFFSET=0x00100000",  # 1MB
arch/x86/BUILD.gn-  ]
arch/x86/BUILD.gn-
--
arch/x86/arch.cc-  // The thread stacks and struct are from a single allocation, free it
arch/x86/arch.cc-  // when we exit into the scheduler.
arch/x86/arch.cc-  thread->flags |= THREAD_FLAG_FREE_STRUCT;
arch/x86/arch.cc-
arch/x86/arch.cc-  lk_secondary_cpu_entry();
arch/x86/arch.cc-
arch/x86/arch.cc-// lk_secondary_cpu_entry only returns on an error, halt the core in this
arch/x86/arch.cc-// case.
arch/x86/arch.cc-fail:
arch/x86/arch.cc-  arch_disable_ints();
arch/x86/arch.cc-  while (1) {
arch/x86/arch.cc-    x86_hlt();
arch/x86/arch.cc-  }
arch/x86/arch.cc-}
arch/x86/arch.cc-
arch/x86/arch.cc-// This is called from assembly, before any other C code.
arch/x86/arch.cc-// The %gs.base is not set up yet, so we have to trust that
arch/x86/arch.cc-// this function is simple enough that the compiler won't
arch/x86/arch.cc-// want to generate stack-protector prologue/epilogue code,
arch/x86/arch.cc-// which would use %gs.
arch/x86/arch.cc:__NO_SAFESTACK __NO_RETURN void x86_secondary_entry(volatile int* aps_still_booting,
arch/x86/arch.cc-                                                    thread_t* thread) {
arch/x86/arch.cc-  // Would prefer this to be in init_percpu, but there is a dependency on a
arch/x86/arch.cc-  // page mapping existing, and the BP calls that before the VM subsystem is
arch/x86/arch.cc-  // initialized.
arch/x86/arch.cc-  apic_local_init();
arch/x86/arch.cc-
arch/x86/arch.cc-  uint32_t local_apic_id = apic_local_id();
arch/x86/arch.cc-  int cpu_num = x86_apic_id_to_cpu_num(local_apic_id);
arch/x86/arch.cc-  if (cpu_num < 0) {
arch/x86/arch.cc-    // If we could not find our CPU number, do not proceed further
arch/x86/arch.cc-    arch_disable_ints();
arch/x86/arch.cc-    while (1) {
arch/x86/arch.cc-      x86_hlt();
arch/x86/arch.cc-    }
arch/x86/arch.cc-  }
arch/x86/arch.cc-
arch/x86/arch.cc-  DEBUG_ASSERT(cpu_num > 0);
arch/x86/arch.cc-
arch/x86/arch.cc-  // Set %gs.base to our percpu struct.  This has to be done before
arch/x86/arch.cc-  // calling x86_init_percpu, which initializes most of that struct, so
arch/x86/arch.cc:  // that x86_init_percpu can use safe-stack and/or stack-protector code.
arch/x86/arch.cc-  struct x86_percpu* const percpu = &ap_percpus[cpu_num - 1];
arch/x86/arch.cc-  write_msr(X86_MSR_IA32_GS_BASE, (uintptr_t)percpu);
arch/x86/arch.cc-
arch/x86/arch.cc-  // Copy the stack-guard value from the boot CPU's perpcu.
arch/x86/arch.cc-  percpu->stack_guard = bp_percpu.stack_guard;
arch/x86/arch.cc-
arch/x86/arch.cc:#if __has_feature(safe_stack)
arch/x86/arch.cc:  // Set up the initial unsafe stack pointer.
arch/x86/arch.cc-  x86_write_gs_offset64(ZX_TLS_UNSAFE_SP_OFFSET,
arch/x86/arch.cc-                        ROUNDDOWN(thread->stack.unsafe_base + thread->stack.size, 16));
arch/x86/arch.cc-#endif
arch/x86/arch.cc-
arch/x86/arch.cc-  x86_init_percpu((uint)cpu_num);
arch/x86/arch.cc-
arch/x86/arch.cc-  // Now do the rest of the work, in a function that is free to
arch/x86/arch.cc-  // use %gs in its code.
arch/x86/arch.cc-  finish_secondary_entry(aps_still_booting, thread, cpu_num);
arch/x86/arch.cc-}
arch/x86/arch.cc-
arch/x86/arch.cc-static int cmd_cpu(int argc, const cmd_args* argv, uint32_t flags) {
arch/x86/arch.cc-  if (argc < 2) {
arch/x86/arch.cc-    printf("not enough arguments\n");
arch/x86/arch.cc-  usage:
arch/x86/arch.cc-    printf("usage:\n");
arch/x86/arch.cc-    printf("%s features\n", argv[0].str);
arch/x86/arch.cc-    printf("%s unplug <cpu_id>\n", argv[0].str);
arch/x86/arch.cc-    printf("%s hotplug <cpu_id>\n", argv[0].str);
arch/x86/arch.cc-    printf("%s rdmsr <cpu_id> <msr_id>\n", argv[0].str);
--
arch/x86/include/arch/arch_thread.h-// Copyright (c) 2009 Corey Tabaka
arch/x86/include/arch/arch_thread.h-// Copyright (c) 2015 Intel Corporation
arch/x86/include/arch/arch_thread.h-//
arch/x86/include/arch/arch_thread.h-// Use of this source code is governed by a MIT-style
arch/x86/include/arch/arch_thread.h-// license that can be found in the LICENSE file or at
arch/x86/include/arch/arch_thread.h-// https://opensource.org/licenses/MIT
arch/x86/include/arch/arch_thread.h-
arch/x86/include/arch/arch_thread.h-#ifndef ZIRCON_KERNEL_ARCH_X86_INCLUDE_ARCH_ARCH_THREAD_H_
arch/x86/include/arch/arch_thread.h-#define ZIRCON_KERNEL_ARCH_X86_INCLUDE_ARCH_ARCH_THREAD_H_
arch/x86/include/arch/arch_thread.h-
arch/x86/include/arch/arch_thread.h-#include <assert.h>
arch/x86/include/arch/arch_thread.h-#include <sys/types.h>
arch/x86/include/arch/arch_thread.h-#include <zircon/compiler.h>
arch/x86/include/arch/arch_thread.h-
arch/x86/include/arch/arch_thread.h-#include <arch/x86/registers.h>
arch/x86/include/arch/arch_thread.h-
arch/x86/include/arch/arch_thread.h-__BEGIN_CDECLS
arch/x86/include/arch/arch_thread.h-
arch/x86/include/arch/arch_thread.h-struct arch_thread {
arch/x86/include/arch/arch_thread.h-  vaddr_t sp;
arch/x86/include/arch/arch_thread.h:#if __has_feature(safe_stack)
arch/x86/include/arch/arch_thread.h-  vaddr_t unsafe_sp;
arch/x86/include/arch/arch_thread.h-#endif
arch/x86/include/arch/arch_thread.h-  vaddr_t fs_base;
arch/x86/include/arch/arch_thread.h-  vaddr_t gs_base;
arch/x86/include/arch/arch_thread.h-
arch/x86/include/arch/arch_thread.h-  // Which entry of |suspended_general_regs| to use.
arch/x86/include/arch/arch_thread.h-  // One of X86_GENERAL_REGS_*.
arch/x86/include/arch/arch_thread.h-  uint32_t general_regs_source;
arch/x86/include/arch/arch_thread.h-
arch/x86/include/arch/arch_thread.h-  // Debugger access to userspace general regs while suspended or stopped
arch/x86/include/arch/arch_thread.h-  // in an exception. See the description of X86_GENERAL_REGS_* for usage.
arch/x86/include/arch/arch_thread.h-  // The regs are saved on the stack and then a pointer is stored here.
arch/x86/include/arch/arch_thread.h-  // Nullptr if not suspended or not stopped in an exception.
arch/x86/include/arch/arch_thread.h-  // TODO(ZX-563): Also nullptr for synthetic exceptions that don't provide
arch/x86/include/arch/arch_thread.h-  // them yet.
arch/x86/include/arch/arch_thread.h-  union {
arch/x86/include/arch/arch_thread.h-    void *gregs;
arch/x86/include/arch/arch_thread.h-    x86_syscall_general_regs_t *syscall;
arch/x86/include/arch/arch_thread.h-    x86_iframe_t *iframe;
arch/x86/include/arch/arch_thread.h-  } suspended_general_regs;
--
arch/x86/mp.cc-#include <arch/x86/tsc.h>
arch/x86/mp.cc-#include <dev/hw_rng.h>
arch/x86/mp.cc-#include <dev/interrupt.h>
arch/x86/mp.cc-#include <kernel/event.h>
arch/x86/mp.cc-#include <kernel/timer.h>
arch/x86/mp.cc-
arch/x86/mp.cc-#define LOCAL_TRACE 0
arch/x86/mp.cc-
arch/x86/mp.cc-// Enable/disable ktraces local to this file.
arch/x86/mp.cc-#define LOCAL_KTRACE_ENABLE 0 || LOCAL_TRACE
arch/x86/mp.cc-
arch/x86/mp.cc-using LocalTraceDuration =
arch/x86/mp.cc-    TraceDuration<TraceEnabled<LOCAL_KTRACE_ENABLE>, KTRACE_GRP_SCHEDULER, TraceContext::Cpu>;
arch/x86/mp.cc-
arch/x86/mp.cc-struct x86_percpu* ap_percpus;
arch/x86/mp.cc-uint8_t x86_num_cpus = 1;
arch/x86/mp.cc-static bool use_monitor = false;
arch/x86/mp.cc-
arch/x86/mp.cc-extern struct idt _idt;
arch/x86/mp.cc-
arch/x86/mp.cc:#if __has_feature(safe_stack)
arch/x86/mp.cc-static uint8_t unsafe_kstack[PAGE_SIZE] __ALIGNED(16);
arch/x86/mp.cc-#define unsafe_kstack_end (&unsafe_kstack[sizeof(unsafe_kstack)])
arch/x86/mp.cc-#else
arch/x86/mp.cc-#define unsafe_kstack_end nullptr
arch/x86/mp.cc-#endif
arch/x86/mp.cc-
arch/x86/mp.cc-// Fake monitor to use until smp is initialized. The size of
arch/x86/mp.cc-// the memory range doesn't matter, since it won't actually get
arch/x86/mp.cc-// used in a non-smp environment.
arch/x86/mp.cc-volatile uint8_t fake_monitor;
arch/x86/mp.cc-
arch/x86/mp.cc-// Also set up a fake table of idle states.
arch/x86/mp.cc-x86_idle_states_t fake_supported_idle_states = {.states = {X86_CSTATE_C1(0)}};
arch/x86/mp.cc-X86IdleStates fake_idle_states = X86IdleStates(&fake_supported_idle_states);
arch/x86/mp.cc-
arch/x86/mp.cc-// Pre-initialize the per cpu structure for the boot cpu. Referenced by
arch/x86/mp.cc-// early boot code prior to being able to initialize via code.
arch/x86/mp.cc-struct x86_percpu bp_percpu = {
arch/x86/mp.cc-    .direct = &bp_percpu,
arch/x86/mp.cc-    .current_thread = {},
--
arch/x86/mp.cc-    if (apic_idx == (uint)cpu_count - 1) {
arch/x86/mp.cc-      /* Never found bootstrap CPU in apic id list */
arch/x86/mp.cc-      return ZX_ERR_BAD_STATE;
arch/x86/mp.cc-    }
arch/x86/mp.cc-    ap_percpus[apic_idx].cpu_num = apic_idx + 1;
arch/x86/mp.cc-    ap_percpus[apic_idx].apic_id = apic_ids[i];
arch/x86/mp.cc-    ap_percpus[apic_idx].direct = &ap_percpus[apic_idx];
arch/x86/mp.cc-    apic_idx++;
arch/x86/mp.cc-  }
arch/x86/mp.cc-
arch/x86/mp.cc-  x86_num_cpus = cpu_count;
arch/x86/mp.cc-  return ZX_OK;
arch/x86/mp.cc-}
arch/x86/mp.cc-
arch/x86/mp.cc-void x86_init_percpu(cpu_num_t cpu_num) {
arch/x86/mp.cc-  struct x86_percpu* const percpu = cpu_num == 0 ? &bp_percpu : &ap_percpus[cpu_num - 1];
arch/x86/mp.cc-  DEBUG_ASSERT(percpu->cpu_num == cpu_num);
arch/x86/mp.cc-  DEBUG_ASSERT(percpu->direct == percpu);
arch/x86/mp.cc-
arch/x86/mp.cc-  // Assembly code has already set up %gs.base so that this function's
arch/x86/mp.cc:  // own code can use it implicitly for stack-protector or safe-stack.
arch/x86/mp.cc-  DEBUG_ASSERT(read_msr(X86_MSR_IA32_GS_BASE) == (uintptr_t)percpu);
arch/x86/mp.cc-
arch/x86/mp.cc-  /* set the KERNEL_GS_BASE MSR to 0 */
arch/x86/mp.cc-  /* when we enter user space, this will be populated via a swapgs */
arch/x86/mp.cc-  write_msr(X86_MSR_IA32_KERNEL_GS_BASE, 0);
arch/x86/mp.cc-
arch/x86/mp.cc-  x86_feature_init();
arch/x86/mp.cc-
arch/x86/mp.cc-  x86_extended_register_init();
arch/x86/mp.cc-  x86_extended_register_enable_feature(X86_EXTENDED_REGISTER_SSE);
arch/x86/mp.cc-  x86_extended_register_enable_feature(X86_EXTENDED_REGISTER_AVX);
arch/x86/mp.cc-
arch/x86/mp.cc-  // This can be turned on/off later by the user. Turn it on here so that
arch/x86/mp.cc-  // the buffer size assumes it's on.
arch/x86/mp.cc-  x86_extended_register_enable_feature(X86_EXTENDED_REGISTER_PT);
arch/x86/mp.cc-  // But then set the default mode to off.
arch/x86/mp.cc-  x86_set_extended_register_pt_state(false);
arch/x86/mp.cc-
arch/x86/mp.cc-  gdt_load(gdt_get());
arch/x86/mp.cc-
--
arch/x86/start.S-    mov %eax, %ds
arch/x86/start.S-    mov %eax, %es
arch/x86/start.S-    mov %eax, %fs
arch/x86/start.S-    mov %eax, %gs
arch/x86/start.S-    mov %eax, %ss
arch/x86/start.S-
arch/x86/start.S-    /* load the high kernel stack */
arch/x86/start.S-    lea _kstack_end(%rip), %rsp
arch/x86/start.S-
arch/x86/start.S-    // move_fixups_and_zero_bss copied the fixup code to _end.
arch/x86/start.S-    // It expects %rdi to contain the actual runtime address of __code_start.
arch/x86/start.S-    lea __code_start(%rip), %rdi
arch/x86/start.S-    call _end
arch/x86/start.S-    // The fixup code won't be used again, so the memory can be reused now.
arch/x86/start.S-
arch/x86/start.S-    /* reload the gdtr after relocations as it relies on relocated VAs */
arch/x86/start.S-    lgdt _temp_gdtr(%rip)
arch/x86/start.S-
arch/x86/start.S-    // Set %gs.base to &bp_percpu.  It's statically initialized
arch/x86/start.S-    // with kernel_unsafe_sp set, so after this it's safe to call
arch/x86/start.S:    // into C code that might use safe-stack and/or stack-protector.
arch/x86/start.S-    lea bp_percpu(%rip), %rax
arch/x86/start.S-    mov %rax, %rdx
arch/x86/start.S-    shr $32, %rdx
arch/x86/start.S-    mov $X86_MSR_IA32_GS_BASE, %ecx
arch/x86/start.S-    wrmsr
arch/x86/start.S-
arch/x86/start.S-    /* set up the idt */
arch/x86/start.S-    lea _idt_startup(%rip), %rdi
arch/x86/start.S-    call idt_setup
arch/x86/start.S-    call load_startup_idt
arch/x86/start.S-
arch/x86/start.S-    /* assign this core CPU# 0 and initialize its per cpu state */
arch/x86/start.S-    xor %edi, %edi
arch/x86/start.S-    call x86_init_percpu
arch/x86/start.S-
arch/x86/start.S-    // Fill the stack canary with a random value as early as possible.
arch/x86/start.S-    // This isn't done in x86_init_percpu because the hw_rng_get_entropy
arch/x86/start.S-    // call would make it eligible for stack-guard checking itself.  But
arch/x86/start.S-    // %gs is not set up yet in the prologue of the function, so it would
arch/x86/start.S-    // crash if it tried to use the stack-guard.
--
arch/x86/start16.S-    // version of it
arch/x86/start16.S-    movabs $.Lwakeup_highaddr, %rbx
arch/x86/start16.S-    jmp  *%rbx
arch/x86/start16.S-.Lwakeup_highaddr:
arch/x86/start16.S-    // Switch to the kernel's PML4
arch/x86/start16.S-    mov %rcx, %cr3
arch/x86/start16.S-    // As of this point, %esi is invalid
arch/x86/start16.S-
arch/x86/start16.S-    // Reload the GDT with one based off of non-identity mapping
arch/x86/start16.S-    lgdt _temp_gdtr(%rip)
arch/x86/start16.S-
arch/x86/start16.S-    // Zero our data segments
arch/x86/start16.S-    xor %eax, %eax
arch/x86/start16.S-    mov %eax, %ds
arch/x86/start16.S-    mov %eax, %es
arch/x86/start16.S-    mov %eax, %fs
arch/x86/start16.S-    mov %eax, %gs
arch/x86/start16.S-    mov %eax, %ss
arch/x86/start16.S-
arch/x86/start16.S-    // Restore %gs.base to &bp_percpu.  We need to do this before
arch/x86/start16.S:    // returning to C code, since the C code might use safe-stack
arch/x86/start16.S-    // and/or stack-protector.
arch/x86/start16.S-    // TODO(teisenbe):  There is a small performance gain that could be made here
arch/x86/start16.S-    // by switching from wrmsr to wrgsbase, if wrgsbase is supported.  Currently
arch/x86/start16.S-    // this is omitted for simplicity.
arch/x86/start16.S-    lea bp_percpu(%rip), %rax
arch/x86/start16.S-    mov %rax, %rdx
arch/x86/start16.S-    shr $32, %rdx
arch/x86/start16.S-    mov $X86_MSR_IA32_GS_BASE, %ecx
arch/x86/start16.S-    wrmsr
arch/x86/start16.S-
arch/x86/start16.S-    // Restore the stack pointer first, so we can use the stack right now.
arch/x86/start16.S-    mov 120(%rdi), %rsp
arch/x86/start16.S-
arch/x86/start16.S-    // Load the IDT.  Note this uses the stack and clobbers %rax, but not %rdi.
arch/x86/start16.S-    call load_startup_idt
arch/x86/start16.S-
arch/x86/start16.S-    mov 8(%rdi), %rsi
arch/x86/start16.S-    mov 16(%rdi), %rbp
arch/x86/start16.S-    mov 24(%rdi), %rbx
arch/x86/start16.S-    mov 32(%rdi), %rdx
--
arch/x86/thread.cc-  // Record a zero return address so that backtraces will stop here.
arch/x86/thread.cc-  // Otherwise if heap debugging is on, and say there is 99..99 here,
arch/x86/thread.cc-  // then the debugger could try to continue the backtrace from there.
arch/x86/thread.cc-  memset((void*)stack_top, 0, 8);
arch/x86/thread.cc-
arch/x86/thread.cc-  // move down a frame size and zero it out
arch/x86/thread.cc-  frame--;
arch/x86/thread.cc-  memset(frame, 0, sizeof(*frame));
arch/x86/thread.cc-
arch/x86/thread.cc-  frame->rip = entry_point;
arch/x86/thread.cc-
arch/x86/thread.cc-  // initialize the saved extended register state
arch/x86/thread.cc-  vaddr_t buf = ROUNDUP(((vaddr_t)t->arch.extended_register_buffer), 64);
arch/x86/thread.cc-  __UNUSED size_t overhead = buf - (vaddr_t)t->arch.extended_register_buffer;
arch/x86/thread.cc-  DEBUG_ASSERT(sizeof(t->arch.extended_register_buffer) - overhead >= x86_extended_register_size());
arch/x86/thread.cc-  t->arch.extended_register_state = (vaddr_t*)buf;
arch/x86/thread.cc-  x86_extended_register_init_state(t->arch.extended_register_state);
arch/x86/thread.cc-
arch/x86/thread.cc-  // set the stack pointer
arch/x86/thread.cc-  t->arch.sp = (vaddr_t)frame;
arch/x86/thread.cc:#if __has_feature(safe_stack)
arch/x86/thread.cc-  t->arch.unsafe_sp = ROUNDDOWN(t->stack.unsafe_base + t->stack.size, 16);
arch/x86/thread.cc-#endif
arch/x86/thread.cc-
arch/x86/thread.cc-  // initialize the fs, gs and kernel bases to 0.
arch/x86/thread.cc-  t->arch.fs_base = 0;
arch/x86/thread.cc-  t->arch.gs_base = 0;
arch/x86/thread.cc-
arch/x86/thread.cc-  // Initialize the debug registers to a valid initial state.
arch/x86/thread.cc-  t->arch.track_debug_state = false;
arch/x86/thread.cc-  for (size_t i = 0; i < 4; i++) {
arch/x86/thread.cc-    t->arch.debug_state.dr[i] = 0;
arch/x86/thread.cc-  }
arch/x86/thread.cc-  t->arch.debug_state.dr6 = ~X86_DR6_USER_MASK;
arch/x86/thread.cc-  t->arch.debug_state.dr7 = ~X86_DR7_USER_MASK;
arch/x86/thread.cc-}
arch/x86/thread.cc-
arch/x86/thread.cc-void arch_thread_construct_first(thread_t* t) {}
arch/x86/thread.cc-
arch/x86/thread.cc-void arch_dump_thread(thread_t* t) {
arch/x86/thread.cc-  if (t->state != THREAD_RUNNING) {
arch/x86/thread.cc-    dprintf(INFO, "\tarch: ");
arch/x86/thread.cc-    dprintf(INFO, "sp %#" PRIxPTR "\n", t->arch.sp);
arch/x86/thread.cc-  }
arch/x86/thread.cc-}
arch/x86/thread.cc-
arch/x86/thread.cc-void* arch_thread_get_blocked_fp(thread_t* t) {
arch/x86/thread.cc-  if (!WITH_FRAME_POINTERS)
arch/x86/thread.cc-    return nullptr;
arch/x86/thread.cc-
arch/x86/thread.cc-  struct x86_64_context_switch_frame* frame = (struct x86_64_context_switch_frame*)t->arch.sp;
arch/x86/thread.cc-
arch/x86/thread.cc-  return (void*)frame->rbp;
arch/x86/thread.cc-}
arch/x86/thread.cc-
arch/x86/thread.cc:__NO_SAFESTACK __attribute__((target("fsgsbase"))) void arch_context_switch(thread_t* oldthread,
arch/x86/thread.cc-                                                                            thread_t* newthread) {
arch/x86/thread.cc-  x86_extended_register_context_switch(oldthread, newthread);
arch/x86/thread.cc-
arch/x86/thread.cc-  x86_debug_state_context_switch(oldthread, newthread);
arch/x86/thread.cc-
arch/x86/thread.cc-  // printf("cs 0x%llx\n", kstack_top);
arch/x86/thread.cc-
arch/x86/thread.cc-  /* set the tss SP0 value to point at the top of our stack */
arch/x86/thread.cc-  x86_set_tss_sp(newthread->stack.top);
arch/x86/thread.cc-
arch/x86/thread.cc-  /* Save the user fs_base register value.  The new rdfsbase instruction
arch/x86/thread.cc-   * is much faster than reading the MSR, so use the former in
arch/x86/thread.cc-   * preference. */
arch/x86/thread.cc-  if (likely(g_x86_feature_fsgsbase)) {
arch/x86/thread.cc-    oldthread->arch.fs_base = _readfsbase_u64();
arch/x86/thread.cc-  } else {
arch/x86/thread.cc-    oldthread->arch.fs_base = read_msr(X86_MSR_IA32_FS_BASE);
arch/x86/thread.cc-  }
arch/x86/thread.cc-
arch/x86/thread.cc-  /* The segment selector registers can't be preserved across context
--
arch/x86/thread.cc-    /* There is no variant of the {rd,wr}gsbase instructions for
arch/x86/thread.cc-     * accessing KERNEL_GS_BASE, so we wrap those in two swapgs
arch/x86/thread.cc-     * instructions to get the same effect.  This is a little
arch/x86/thread.cc-     * convoluted, but still faster than using the KERNEL_GS_BASE
arch/x86/thread.cc-     * MSRs. */
arch/x86/thread.cc-    __asm__ __volatile__(
arch/x86/thread.cc-        "swapgs\n"
arch/x86/thread.cc-        "rdgsbase %[old_value]\n"
arch/x86/thread.cc-        "wrgsbase %[new_value]\n"
arch/x86/thread.cc-        "swapgs\n"
arch/x86/thread.cc-        : [ old_value ] "=&r"(oldthread->arch.gs_base)
arch/x86/thread.cc-        : [ new_value ] "r"(newthread->arch.gs_base));
arch/x86/thread.cc-
arch/x86/thread.cc-    _writefsbase_u64(newthread->arch.fs_base);
arch/x86/thread.cc-  } else {
arch/x86/thread.cc-    oldthread->arch.gs_base = read_msr(X86_MSR_IA32_KERNEL_GS_BASE);
arch/x86/thread.cc-    write_msr(X86_MSR_IA32_FS_BASE, newthread->arch.fs_base);
arch/x86/thread.cc-    write_msr(X86_MSR_IA32_KERNEL_GS_BASE, newthread->arch.gs_base);
arch/x86/thread.cc-  }
arch/x86/thread.cc-
arch/x86/thread.cc:#if __has_feature(safe_stack)
arch/x86/thread.cc-  oldthread->arch.unsafe_sp = x86_read_gs_offset64(ZX_TLS_UNSAFE_SP_OFFSET);
arch/x86/thread.cc-  x86_write_gs_offset64(ZX_TLS_UNSAFE_SP_OFFSET, newthread->arch.unsafe_sp);
arch/x86/thread.cc-#endif
arch/x86/thread.cc-
arch/x86/thread.cc-  x86_64_context_switch(&oldthread->arch.sp, newthread->arch.sp);
arch/x86/thread.cc-}
arch/x86/thread.cc-
arch/x86/thread.cc-void x86_debug_state_context_switch(thread_t* old_thread, thread_t* new_thread) {
arch/x86/thread.cc-  // If the new thread has debug state, then install it, replacing the current contents.
arch/x86/thread.cc-  if (unlikely(new_thread->arch.track_debug_state)) {
arch/x86/thread.cc-    // NOTE: There is no enable debug state call, as x86 doesn't have a global enable/disable
arch/x86/thread.cc-    //       switch, but rather enables particular registers through DR7. These registers are
arch/x86/thread.cc-    //       selected by userspace (and filtered by zircon) in the thread_write_state state
arch/x86/thread.cc-    //       syscall.
arch/x86/thread.cc-    //
arch/x86/thread.cc-    //       This means that just writing the thread debug state into the CPU is enough to
arch/x86/thread.cc-    //       activate the debug functionality.
arch/x86/thread.cc-    x86_write_hw_debug_regs(&new_thread->arch.debug_state);
arch/x86/thread.cc-    return;
arch/x86/thread.cc-  }
--
kernel/legacy_scheduler.cc-  DEBUG_ASSERT(now > current_thread->last_started_running);
kernel/legacy_scheduler.cc-  zx_duration_t delta = zx_time_sub_time(now, current_thread->last_started_running);
kernel/legacy_scheduler.cc-  if (delta >= current_thread->remaining_time_slice) {
kernel/legacy_scheduler.cc-    // we completed the time slice, do not restart it and let the scheduler run
kernel/legacy_scheduler.cc-    current_thread->remaining_time_slice = 0;
kernel/legacy_scheduler.cc-
kernel/legacy_scheduler.cc-    // set a timer to go off on the time slice interval from now
kernel/legacy_scheduler.cc-    timer_preempt_reset(zx_time_add_duration(now, THREAD_INITIAL_TIME_SLICE));
kernel/legacy_scheduler.cc-
kernel/legacy_scheduler.cc-    // Mark a reschedule as pending.  The irq handler will call back
kernel/legacy_scheduler.cc-    // into us with sched_preempt().
kernel/legacy_scheduler.cc-    thread_preempt_set_pending();
kernel/legacy_scheduler.cc-  } else {
kernel/legacy_scheduler.cc-    // the timer tick must have fired early, reschedule and continue
kernel/legacy_scheduler.cc-    zx_time_t deadline = zx_time_add_duration(current_thread->last_started_running,
kernel/legacy_scheduler.cc-                                              current_thread->remaining_time_slice);
kernel/legacy_scheduler.cc-    timer_preempt_reset(deadline);
kernel/legacy_scheduler.cc-  }
kernel/legacy_scheduler.cc-}
kernel/legacy_scheduler.cc-
kernel/legacy_scheduler.cc:// On ARM64 with safe-stack, it's no longer possible to use the unsafe-sp
kernel/legacy_scheduler.cc-// after set_current_thread (we'd now see newthread's unsafe-sp instead!).
kernel/legacy_scheduler.cc-// Hence this function and everything it calls between this point and the
kernel/legacy_scheduler.cc:// the low-level context switch must be marked with __NO_SAFESTACK.
kernel/legacy_scheduler.cc:__NO_SAFESTACK static void final_context_switch(thread_t* oldthread, thread_t* newthread) {
kernel/legacy_scheduler.cc-  set_current_thread(newthread);
kernel/legacy_scheduler.cc-  arch_context_switch(oldthread, newthread);
kernel/legacy_scheduler.cc-}
kernel/legacy_scheduler.cc-
kernel/legacy_scheduler.cc-// Internal reschedule routine. The current thread needs to already be in whatever
kernel/legacy_scheduler.cc-// state and queues it needs to be in. This routine simply picks the next thread and
kernel/legacy_scheduler.cc-// switches to it.
kernel/legacy_scheduler.cc-void sched_resched_internal() {
kernel/legacy_scheduler.cc-  thread_t* current_thread = get_current_thread();
kernel/legacy_scheduler.cc-  uint cpu = arch_curr_cpu_num();
kernel/legacy_scheduler.cc-
kernel/legacy_scheduler.cc-  DEBUG_ASSERT(arch_ints_disabled());
kernel/legacy_scheduler.cc-  DEBUG_ASSERT(spin_lock_held(&thread_lock));
kernel/legacy_scheduler.cc-  // Aside from the thread_lock, spinlocks should never be held over a reschedule.
kernel/legacy_scheduler.cc-  DEBUG_ASSERT(arch_num_spinlocks_held() == 1);
kernel/legacy_scheduler.cc-  DEBUG_ASSERT_MSG(current_thread->state != THREAD_RUNNING, "state %d\n", current_thread->state);
kernel/legacy_scheduler.cc-  DEBUG_ASSERT(!arch_blocking_disallowed());
kernel/legacy_scheduler.cc-
kernel/legacy_scheduler.cc-  CPU_STATS_INC(reschedules);
kernel/legacy_scheduler.cc-
--
kernel/scheduler.cc-    121,   149,   182,   223,   273,   335,   410,   503,   616,   754,  924,
kernel/scheduler.cc-    1132,  1386,  1698,  2080,  2549,  3122,  3825,  4685,  5739,  7030, 8612,
kernel/scheduler.cc-    10550, 12924, 15832, 19394, 23757, 29103, 35651, 43672, 53499, 65536};
kernel/scheduler.cc-
kernel/scheduler.cc-// Converts from kernel priority value in the interval [0, 31] to weight in the
kernel/scheduler.cc-// interval (0.0, 1.0]. See the definition of SchedWeight for an explanation of
kernel/scheduler.cc-// the weight distribution.
kernel/scheduler.cc-constexpr SchedWeight PriorityToWeight(int priority) { return kPriorityToWeightTable[priority]; }
kernel/scheduler.cc-
kernel/scheduler.cc-// The minimum possible weight and its reciprocal.
kernel/scheduler.cc-constexpr SchedWeight kMinWeight = PriorityToWeight(LOWEST_PRIORITY);
kernel/scheduler.cc-constexpr SchedWeight kReciprocalMinWeight = 1 / kMinWeight;
kernel/scheduler.cc-
kernel/scheduler.cc-// Utility operator to make expressions more succinct that update thread times
kernel/scheduler.cc-// and durations of basic types using the fixed-point counterparts.
kernel/scheduler.cc-constexpr zx_time_t& operator+=(zx_time_t& value, SchedDuration delta) {
kernel/scheduler.cc-  value += delta.raw_value();
kernel/scheduler.cc-  return value;
kernel/scheduler.cc-}
kernel/scheduler.cc-
kernel/scheduler.cc:// On ARM64 with safe-stack, it's no longer possible to use the unsafe-sp
kernel/scheduler.cc-// after set_current_thread (we'd now see newthread's unsafe-sp instead!).
kernel/scheduler.cc-// Hence this function and everything it calls between this point and the
kernel/scheduler.cc:// the low-level context switch must be marked with __NO_SAFESTACK.
kernel/scheduler.cc:__NO_SAFESTACK void FinalContextSwitch(thread_t* oldthread, thread_t* newthread) {
kernel/scheduler.cc-  set_current_thread(newthread);
kernel/scheduler.cc-  arch_context_switch(oldthread, newthread);
kernel/scheduler.cc-}
kernel/scheduler.cc-
kernel/scheduler.cc-inline void TraceContextSwitch(const thread_t* current_thread, const thread_t* next_thread,
kernel/scheduler.cc-                               cpu_num_t current_cpu) {
kernel/scheduler.cc-  const uintptr_t raw_current = reinterpret_cast<uintptr_t>(current_thread);
kernel/scheduler.cc-  const uintptr_t raw_next = reinterpret_cast<uintptr_t>(next_thread);
kernel/scheduler.cc-  const uint32_t current = static_cast<uint32_t>(raw_current);
kernel/scheduler.cc-  const uint32_t next = static_cast<uint32_t>(raw_next);
kernel/scheduler.cc-  const uint32_t user_tid = static_cast<uint32_t>(next_thread->user_tid);
kernel/scheduler.cc-  const uint32_t context = current_cpu | (current_thread->state << 8) |
kernel/scheduler.cc-                           (current_thread->base_priority << 16) |
kernel/scheduler.cc-                           (next_thread->base_priority << 24);
kernel/scheduler.cc-
kernel/scheduler.cc-  ktrace(TAG_CONTEXT_SWITCH, user_tid, context, current, next);
kernel/scheduler.cc-}
kernel/scheduler.cc-
kernel/scheduler.cc-// Returns a sufficiently unique flow id for a thread based on the thread id and
kernel/scheduler.cc-// queue generation count. This flow id cannot be used across enqueues because
--
kernel/thread.cc-
kernel/thread.cc-  // set up the initial stack frame
kernel/thread.cc-  arch_thread_initialize(t, (vaddr_t)alt_trampoline);
kernel/thread.cc-
kernel/thread.cc-  // add it to the global thread list
kernel/thread.cc-  {
kernel/thread.cc-    Guard<spin_lock_t, IrqSave> guard{ThreadLock::Get()};
kernel/thread.cc-    list_add_head(&thread_list, &t->thread_list_node);
kernel/thread.cc-  }
kernel/thread.cc-
kernel/thread.cc-  kcounter_add(thread_create_count, 1);
kernel/thread.cc-  return t;
kernel/thread.cc-}
kernel/thread.cc-
kernel/thread.cc-thread_t* thread_create(const char* name, thread_start_routine entry, void* arg, int priority) {
kernel/thread.cc-  return thread_create_etc(NULL, name, entry, arg, priority, NULL);
kernel/thread.cc-}
kernel/thread.cc-
kernel/thread.cc-static void free_thread_resources(thread_t* t) {
kernel/thread.cc-  if (t->stack.vmar != nullptr) {
kernel/thread.cc:#if __has_feature(safe_stack)
kernel/thread.cc-    DEBUG_ASSERT(t->stack.unsafe_vmar != nullptr);
kernel/thread.cc-#endif
kernel/thread.cc-    zx_status_t status = vm_free_kstack(&t->stack);
kernel/thread.cc-    DEBUG_ASSERT(status == ZX_OK);
kernel/thread.cc-  }
kernel/thread.cc-
kernel/thread.cc-  // call the tls callback for each slot as long there is one
kernel/thread.cc-  for (uint ix = 0; ix != THREAD_MAX_TLS_ENTRY; ++ix) {
kernel/thread.cc-    if (t->tls_callback[ix]) {
kernel/thread.cc-      t->tls_callback[ix](t->tls[ix]);
kernel/thread.cc-    }
kernel/thread.cc-  }
kernel/thread.cc-
kernel/thread.cc-  // free the thread structure itself.  Manually trigger the struct's
kernel/thread.cc-  // destructor so that DEBUG_ASSERTs present in the owned_wait_queues member
kernel/thread.cc-  // get triggered.
kernel/thread.cc-  bool thread_needs_free = (t->flags & THREAD_FLAG_FREE_STRUCT) != 0;
kernel/thread.cc-  t->magic = 0;
kernel/thread.cc-  t->~thread_t();
kernel/thread.cc-  if (thread_needs_free) {
--
kernel/thread.cc-  if (t->state == THREAD_RUNNING) {
kernel/thread.cc-    zx_duration_t recent = zx_time_sub_time(current_time(), t->last_started_running);
kernel/thread.cc-    runtime = zx_duration_add_duration(runtime, recent);
kernel/thread.cc-  }
kernel/thread.cc-
kernel/thread.cc-  char oname[THREAD_NAME_LENGTH];
kernel/thread.cc-  thread_owner_name(t, oname);
kernel/thread.cc-
kernel/thread.cc-  if (full_dump) {
kernel/thread.cc-    dprintf(INFO, "dump_thread: t %p (%s:%s)\n", t, oname, t->name);
kernel/thread.cc-    dprintf(INFO,
kernel/thread.cc-            "\tstate %s, curr/last cpu %d/%d, hard_affinity %#x, soft_cpu_affinty %#x, "
kernel/thread.cc-            "priority %d [%d:%d,%d], remaining time slice %" PRIi64 "\n",
kernel/thread.cc-            thread_state_to_str(t->state), (int)t->curr_cpu, (int)t->last_cpu, t->hard_affinity,
kernel/thread.cc-            t->soft_affinity, t->effec_priority, t->base_priority, t->priority_boost,
kernel/thread.cc-            t->inherited_priority, t->remaining_time_slice);
kernel/thread.cc-    dprintf(INFO, "\truntime_ns %" PRIi64 ", runtime_s %" PRIi64 "\n", runtime,
kernel/thread.cc-            runtime / 1000000000);
kernel/thread.cc-    dprintf(INFO, "\tstack.base 0x%lx, stack.vmar %p, stack.size %zu\n", t->stack.base,
kernel/thread.cc-            t->stack.vmar, t->stack.size);
kernel/thread.cc:#if __has_feature(safe_stack)
kernel/thread.cc-    dprintf(INFO, "\tstack.unsafe_base 0x%lx, stack.unsafe_vmar %p\n", t->stack.unsafe_base,
kernel/thread.cc-            t->stack.unsafe_vmar);
kernel/thread.cc-#endif
kernel/thread.cc-    dprintf(INFO, "\tentry %p, arg %p, flags 0x%x %s%s%s%s\n", t->entry, t->arg, t->flags,
kernel/thread.cc-            (t->flags & THREAD_FLAG_DETACHED) ? "Dt" : "",
kernel/thread.cc-            (t->flags & THREAD_FLAG_FREE_STRUCT) ? "Ft" : "",
kernel/thread.cc-            (t->flags & THREAD_FLAG_REAL_TIME) ? "Rt" : "",
kernel/thread.cc-            (t->flags & THREAD_FLAG_IDLE) ? "Id" : "");
kernel/thread.cc-
kernel/thread.cc-    dprintf(INFO, "\twait queue %p, blocked_status %d, interruptable %d, wait queues owned %s\n",
kernel/thread.cc-            t->blocking_wait_queue, t->blocked_status, t->interruptable,
kernel/thread.cc-            t->owned_wait_queues.is_empty() ? "no" : "yes");
kernel/thread.cc-
kernel/thread.cc-    dprintf(INFO, "\taspace %p\n", t->aspace);
kernel/thread.cc-    dprintf(INFO, "\tuser_thread %p, pid %" PRIu64 ", tid %" PRIu64 "\n", t->user_thread,
kernel/thread.cc-            t->user_pid, t->user_tid);
kernel/thread.cc-    arch_dump_thread(t);
kernel/thread.cc-  } else {
kernel/thread.cc-    printf("thr %p st %4s owq %d pri %2d [%d:%d,%d] pid %" PRIu64 " tid %" PRIu64 " (%s:%s)\n", t,
kernel/thread.cc-           thread_state_to_str(t->state), !t->owned_wait_queues.is_empty(), t->effec_priority,
--
vm/bootalloc.cc-#include <vm/physmap.h>
vm/bootalloc.cc-#include <vm/pmm.h>
vm/bootalloc.cc-#include <vm/vm.h>
vm/bootalloc.cc-
vm/bootalloc.cc-#include "vm_priv.h"
vm/bootalloc.cc-
vm/bootalloc.cc-#define LOCAL_TRACE MAX(VM_GLOBAL_TRACE, 0)
vm/bootalloc.cc-
vm/bootalloc.cc-// Simple boot time allocator that starts by allocating physical memory off
vm/bootalloc.cc-// the end of wherever the kernel is loaded in physical space.
vm/bootalloc.cc-//
vm/bootalloc.cc-// Pointers are returned from the kernel's physmap
vm/bootalloc.cc-
vm/bootalloc.cc-// store the start and current pointer to the boot allocator in physical address
vm/bootalloc.cc-paddr_t boot_alloc_start;
vm/bootalloc.cc-paddr_t boot_alloc_end;
vm/bootalloc.cc-
vm/bootalloc.cc-// run in physical space without the mmu set up, so by computing the address of _end
vm/bootalloc.cc-// and saving it, we've effectively computed the physical address of the end of the
vm/bootalloc.cc-// kernel.
vm/bootalloc.cc:__NO_SAFESTACK
vm/bootalloc.cc-void boot_alloc_init() {
vm/bootalloc.cc-  boot_alloc_start = reinterpret_cast<paddr_t>(_end);
vm/bootalloc.cc-  // TODO(ZX-2563): This is a compile-time no-op that defeats any compiler
vm/bootalloc.cc-  // optimizations based on its knowledge/assumption that `&_end` is a
vm/bootalloc.cc-  // constant here that equals the `&_end` constant as computed elsewhere.
vm/bootalloc.cc-  // Without this, the compiler can see that boot_alloc_start is never set to
vm/bootalloc.cc-  // any other value and replace code that uses the boot_alloc_start value
vm/bootalloc.cc-  // with code that computes `&_end` on the spot.  What the compiler doesn't
vm/bootalloc.cc-  // know is that this `&_end` is crucially a PC-relative computation when
vm/bootalloc.cc-  // the PC is a (low) physical address.  Later code that uses
vm/bootalloc.cc-  // boot_alloc_start will be running at a kernel (high) virtual address and
vm/bootalloc.cc-  // so its `&_end` will be nowhere near the same value.  The compiler isn't
vm/bootalloc.cc-  // wrong to do this sort of optimization when it can and other such cases
vm/bootalloc.cc-  // will eventually arise.  So long-term we'll need more thorough
vm/bootalloc.cc-  // compile-time separation of the early boot code that runs in physical
vm/bootalloc.cc-  // space from normal kernel code.  For now, this asm generates no
vm/bootalloc.cc-  // additional code but tells the compiler that it has no idea what value
vm/bootalloc.cc-  // boot_alloc_start might take, so it has to compute the `&_end` value now.
vm/bootalloc.cc-  __asm__("" : "=g"(boot_alloc_start) : "0"(boot_alloc_start));
vm/bootalloc.cc-  boot_alloc_end = reinterpret_cast<paddr_t>(_end);
--
vm/bootalloc.cc-      // (gigabytes) and there may not be space after it...
vm/bootalloc.cc-      return;
vm/bootalloc.cc-    }
vm/bootalloc.cc-    boot_alloc_start = boot_alloc_end = end;
vm/bootalloc.cc-  }
vm/bootalloc.cc-}
vm/bootalloc.cc-
vm/bootalloc.cc-void* boot_alloc_mem(size_t len) {
vm/bootalloc.cc-  uintptr_t ptr;
vm/bootalloc.cc-
vm/bootalloc.cc-  ptr = ALIGN(boot_alloc_end, 8);
vm/bootalloc.cc-  boot_alloc_end = (ptr + ALIGN(len, 8));
vm/bootalloc.cc-
vm/bootalloc.cc-  LTRACEF("len %zu, phys ptr %#" PRIxPTR " ptr %p\n", len, ptr, paddr_to_physmap(ptr));
vm/bootalloc.cc-
vm/bootalloc.cc-  return paddr_to_physmap(ptr);
vm/bootalloc.cc-}
vm/bootalloc.cc-
vm/bootalloc.cc-// called from arch start.S
vm/bootalloc.cc-// run in physical space without the mmu set up, so stick to basic, relocatable code
vm/bootalloc.cc:__NO_SAFESTACK
vm/bootalloc.cc-paddr_t boot_alloc_page_phys() {
vm/bootalloc.cc-  paddr_t ptr = ALIGN(boot_alloc_end, PAGE_SIZE);
vm/bootalloc.cc-  boot_alloc_end = ptr + PAGE_SIZE;
vm/bootalloc.cc-
vm/bootalloc.cc-  return ptr;
vm/bootalloc.cc-}
--
vm/include/vm/kstack.h-//
vm/include/vm/kstack.h-// Use of this source code is governed by a MIT-style
vm/include/vm/kstack.h-// license that can be found in the LICENSE file or at
vm/include/vm/kstack.h-// https://opensource.org/licenses/MIT
vm/include/vm/kstack.h-#ifndef ZIRCON_KERNEL_VM_INCLUDE_VM_KSTACK_H_
vm/include/vm/kstack.h-#define ZIRCON_KERNEL_VM_INCLUDE_VM_KSTACK_H_
vm/include/vm/kstack.h-
vm/include/vm/kstack.h-#include <err.h>
vm/include/vm/kstack.h-#include <sys/types.h>
vm/include/vm/kstack.h-
vm/include/vm/kstack.h-__BEGIN_CDECLS
vm/include/vm/kstack.h-
vm/include/vm/kstack.h-// kstack encapsulates a kernel stack.
vm/include/vm/kstack.h-//
vm/include/vm/kstack.h-// kstack must be a C struct because it is embedded in thread_t.
vm/include/vm/kstack.h-typedef struct kstack {
vm/include/vm/kstack.h-  vaddr_t base;
vm/include/vm/kstack.h-  size_t size;
vm/include/vm/kstack.h-  vaddr_t top;
vm/include/vm/kstack.h-
vm/include/vm/kstack.h:  // When non-null, |vmar| (and, if safe-stack is enabled, |unsafe_vmar|) points to a ref-counted
vm/include/vm/kstack.h-  // VmAddressRegion that must be freed via |vm_free_kstack|.
vm/include/vm/kstack.h-  //
vm/include/vm/kstack.h-  // Note, the type is void* rather than |fbl::RefPtr| because this struct is used by C code.
vm/include/vm/kstack.h-  void* vmar;
vm/include/vm/kstack.h:#if __has_feature(safe_stack)
vm/include/vm/kstack.h-  vaddr_t unsafe_base;
vm/include/vm/kstack.h-  // See comment for |vmar|.
vm/include/vm/kstack.h-  void* unsafe_vmar;
vm/include/vm/kstack.h-#endif
vm/include/vm/kstack.h-} kstack_t;
vm/include/vm/kstack.h-
vm/include/vm/kstack.h-// Allocates a kernel stack with appropriate overrun padding.
vm/include/vm/kstack.h-//
vm/include/vm/kstack.h-// Assumes stack has been zero-initialized.
vm/include/vm/kstack.h-zx_status_t vm_allocate_kstack(kstack_t* stack);
vm/include/vm/kstack.h-
vm/include/vm/kstack.h-// Frees a stack allocated by |vm_allocate_kstack|.
vm/include/vm/kstack.h-zx_status_t vm_free_kstack(kstack_t* stack);
vm/include/vm/kstack.h-
vm/include/vm/kstack.h-__END_CDECLS
vm/include/vm/kstack.h-
vm/include/vm/kstack.h-#endif  // ZIRCON_KERNEL_VM_INCLUDE_VM_KSTACK_H_
--
vm/kstack.cc-
vm/kstack.cc-#define LOCAL_TRACE 0
vm/kstack.cc-
vm/kstack.cc-// Allocates and maps a kernel stack with one page of padding before and after the mapping.
vm/kstack.cc-static zx_status_t allocate_vmar(bool unsafe, fbl::RefPtr<VmMapping>* out_kstack_mapping,
vm/kstack.cc-                                 fbl::RefPtr<VmAddressRegion>* out_kstack_vmar) {
vm/kstack.cc-  LTRACEF("allocating %s stack\n", unsafe ? "unsafe" : "safe");
vm/kstack.cc-
vm/kstack.cc-  // get a handle to the root vmar
vm/kstack.cc-  auto vmar = VmAspace::kernel_aspace()->RootVmar()->as_vm_address_region();
vm/kstack.cc-  DEBUG_ASSERT(!!vmar);
vm/kstack.cc-
vm/kstack.cc-  // Create a VMO for our stack
vm/kstack.cc-  fbl::RefPtr<VmObject> stack_vmo;
vm/kstack.cc-  zx_status_t status =
vm/kstack.cc-      VmObjectPaged::Create(PMM_ALLOC_FLAG_ANY, 0u, DEFAULT_STACK_SIZE, &stack_vmo);
vm/kstack.cc-  if (status != ZX_OK) {
vm/kstack.cc-    TRACEF("error allocating %s stack for thread\n", unsafe ? "unsafe" : "safe");
vm/kstack.cc-    return status;
vm/kstack.cc-  }
vm/kstack.cc:  const char* name = unsafe ? "unsafe-stack" : "safe-stack";
vm/kstack.cc-  stack_vmo->set_name(name, strlen(name));
vm/kstack.cc-
vm/kstack.cc-  // create a vmar with enough padding for a page before and after the stack
vm/kstack.cc-  const size_t padding_size = PAGE_SIZE;
vm/kstack.cc-
vm/kstack.cc-  fbl::RefPtr<VmAddressRegion> kstack_vmar;
vm/kstack.cc-  status = vmar->CreateSubVmar(
vm/kstack.cc-      0, 2 * padding_size + DEFAULT_STACK_SIZE, 0,
vm/kstack.cc-      VMAR_FLAG_CAN_MAP_SPECIFIC | VMAR_FLAG_CAN_MAP_READ | VMAR_FLAG_CAN_MAP_WRITE,
vm/kstack.cc-      unsafe ? "unsafe_kstack_vmar" : "kstack_vmar", &kstack_vmar);
vm/kstack.cc-  if (status != ZX_OK) {
vm/kstack.cc-    return status;
vm/kstack.cc-  }
vm/kstack.cc-
vm/kstack.cc-  // destroy the vmar if we early abort
vm/kstack.cc-  // this will also clean up any mappings that may get placed on the vmar
vm/kstack.cc-  auto vmar_cleanup = fbl::MakeAutoCall([&kstack_vmar]() { kstack_vmar->Destroy(); });
vm/kstack.cc-
vm/kstack.cc-  LTRACEF("%s stack vmar at %#" PRIxPTR "\n", unsafe ? "unsafe" : "safe", kstack_vmar->base());
vm/kstack.cc-
--
vm/kstack.cc-  // fault in all the pages so we dont demand fault in the stack
vm/kstack.cc-  status = kstack_mapping->MapRange(0, DEFAULT_STACK_SIZE, true);
vm/kstack.cc-  if (status != ZX_OK) {
vm/kstack.cc-    return status;
vm/kstack.cc-  }
vm/kstack.cc-
vm/kstack.cc-  // Cancel the cleanup handler on the vmar since we're about to save a
vm/kstack.cc-  // reference to it.
vm/kstack.cc-  vmar_cleanup.cancel();
vm/kstack.cc-  *out_kstack_mapping = ktl::move(kstack_mapping);
vm/kstack.cc-  *out_kstack_vmar = ktl::move(kstack_vmar);
vm/kstack.cc-
vm/kstack.cc-  return ZX_OK;
vm/kstack.cc-}
vm/kstack.cc-
vm/kstack.cc-zx_status_t vm_allocate_kstack(kstack_t* stack) {
vm/kstack.cc-  DEBUG_ASSERT(stack->base == 0);
vm/kstack.cc-  DEBUG_ASSERT(stack->size == 0);
vm/kstack.cc-  DEBUG_ASSERT(stack->top == 0);
vm/kstack.cc-  DEBUG_ASSERT(stack->vmar == nullptr);
vm/kstack.cc:#if __has_feature(safe_stack)
vm/kstack.cc-  DEBUG_ASSERT(stack->unsafe_base == 0);
vm/kstack.cc-  DEBUG_ASSERT(stack->unsafe_vmar == nullptr);
vm/kstack.cc-#endif
vm/kstack.cc-
vm/kstack.cc-  fbl::RefPtr<VmMapping> mapping;
vm/kstack.cc-  fbl::RefPtr<VmAddressRegion> vmar;
vm/kstack.cc-  zx_status_t status = allocate_vmar(false, &mapping, &vmar);
vm/kstack.cc-  if (status != ZX_OK) {
vm/kstack.cc-    return status;
vm/kstack.cc-  }
vm/kstack.cc-  stack->size = mapping->size();
vm/kstack.cc-  stack->base = mapping->base();
vm/kstack.cc-  stack->top = mapping->base() + DEFAULT_STACK_SIZE;
vm/kstack.cc-
vm/kstack.cc-  // Stash address of VMAR so we can later free it in |vm_free_kstack|.
vm/kstack.cc-  stack->vmar = vmar.leak_ref();
vm/kstack.cc-
vm/kstack.cc:#if __has_feature(safe_stack)
vm/kstack.cc-  status = allocate_vmar(true, &mapping, &vmar);
vm/kstack.cc-  if (status != ZX_OK) {
vm/kstack.cc-    vm_free_kstack(stack);
vm/kstack.cc-    return status;
vm/kstack.cc-  }
vm/kstack.cc-  stack->size = mapping->size();
vm/kstack.cc-  stack->unsafe_base = mapping->base();
vm/kstack.cc-
vm/kstack.cc-  // Stash address of VMAR so we can later free it in |vm_free_kstack|.
vm/kstack.cc-  stack->unsafe_vmar = vmar.leak_ref();
vm/kstack.cc-#endif
vm/kstack.cc-
vm/kstack.cc-  return ZX_OK;
vm/kstack.cc-}
vm/kstack.cc-
vm/kstack.cc-zx_status_t vm_free_kstack(kstack_t* stack) {
vm/kstack.cc-  stack->base = 0;
vm/kstack.cc-  stack->size = 0;
vm/kstack.cc-  stack->top = 0;
vm/kstack.cc-
vm/kstack.cc-  if (stack->vmar != nullptr) {
vm/kstack.cc-    fbl::RefPtr<VmAddressRegion> vmar =
vm/kstack.cc-        fbl::internal::MakeRefPtrNoAdopt(static_cast<VmAddressRegion*>(stack->vmar));
vm/kstack.cc-    zx_status_t status = vmar->Destroy();
vm/kstack.cc-    if (status != ZX_OK) {
vm/kstack.cc-      return status;
vm/kstack.cc-    }
vm/kstack.cc-    stack->vmar = nullptr;
vm/kstack.cc-  }
vm/kstack.cc-
vm/kstack.cc:#if __has_feature(safe_stack)
vm/kstack.cc-  stack->unsafe_base = 0;
vm/kstack.cc-
vm/kstack.cc-  if (stack->unsafe_vmar != nullptr) {
vm/kstack.cc-    fbl::RefPtr<VmAddressRegion> vmar =
vm/kstack.cc-        fbl::internal::MakeRefPtrNoAdopt(static_cast<VmAddressRegion*>(stack->unsafe_vmar));
vm/kstack.cc-    zx_status_t status = vmar->Destroy();
vm/kstack.cc-    if (status != ZX_OK) {
vm/kstack.cc-      return status;
vm/kstack.cc-    }
vm/kstack.cc-    stack->unsafe_vmar = nullptr;
vm/kstack.cc-  }
vm/kstack.cc-#endif
vm/kstack.cc-
vm/kstack.cc-  return ZX_OK;
vm/kstack.cc-}