#include <pexpert/arm64/board_config.h>
#include <kern/kalloc.h>
#include <kern/machine.h>
#include <kern/cpu_number.h>
#include <kern/thread.h>
#include <kern/timer_queue.h>
#include <arm/cpu_data.h>
#include <arm/cpuid.h>
#include <arm/caches_internal.h>
#include <arm/cpu_data_internal.h>
#include <arm/cpu_internal.h>
#include <arm/misc_protos.h>
#include <arm/machine_cpu.h>
#include <arm/rtclock.h>
#include <arm64/proc_reg.h>
#include <mach/processor_info.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <pexpert/arm/protos.h>
#include <pexpert/device_tree.h>
#include <sys/kdebug.h>
#include <arm/machine_routines.h>
#include <machine/atomic.h>
#include <san/kasan.h>
#if KPC
#include <kern/kpc.h>
#endif
#if MONOTONIC
#include <kern/monotonic.h>
#endif
extern boolean_t idle_enable;
extern uint64_t wake_abstime;
#if WITH_CLASSIC_S2R
void sleep_token_buffer_init(void);
#endif
extern uintptr_t resume_idle_cpu;
extern uintptr_t start_cpu;
#if __ARM_KERNEL_PROTECT__
extern void exc_vectors_table;
#endif
extern void __attribute__((noreturn)) arm64_prepare_for_sleep(void);
extern void arm64_force_wfi_clock_gate(void);
#if (defined(APPLECYCLONE) || defined(APPLETYPHOON))
extern void cyclone_typhoon_prepare_for_wfi(void);
extern void cyclone_typhoon_return_from_wfi(void);
#endif
vm_address_t start_cpu_paddr;
sysreg_restore_t sysreg_restore __attribute__((section("__DATA, __const"))) = {
.tcr_el1 = TCR_EL1_BOOT,
};
static int wfi = 1;
#if DEVELOPMENT || DEBUG
static int wfi_flags = 0;
static uint64_t wfi_delay = 0;
#endif
#if __ARM_GLOBAL_SLEEP_BIT__
volatile boolean_t arm64_stall_sleep = TRUE;
#endif
#if WITH_CLASSIC_S2R
static const uint8_t __attribute__ ((aligned(8))) suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
static const uint8_t __attribute__ ((aligned(8))) running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
#endif
#if WITH_CLASSIC_S2R
static vm_offset_t sleepTokenBuffer = (vm_offset_t)NULL;
#endif
static boolean_t coresight_debug_enabled = FALSE;
#if defined(CONFIG_XNUPOST)
void arm64_ipi_test_callback(void *);
void arm64_ipi_test_callback(void *parm) {
volatile uint64_t *ipi_test_data = parm;
cpu_data_t *cpu_data;
cpu_data = getCpuDatap();
*ipi_test_data = cpu_data->cpu_number;
}
uint64_t arm64_ipi_test_data[MAX_CPUS];
void arm64_ipi_test() {
volatile uint64_t *ipi_test_data;
uint32_t timeout_ms = 100;
uint64_t then, now, delta;
int current_cpu_number = getCpuDatap()->cpu_number;
if (real_ncpus == 1) {
return;
}
for (unsigned int i = 0; i < MAX_CPUS; ++i) {
ipi_test_data = &arm64_ipi_test_data[i];
*ipi_test_data = ~i;
kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data);
if (error != KERN_SUCCESS)
panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error);
then = mach_absolute_time();
while (*ipi_test_data != i) {
now = mach_absolute_time();
absolutetime_to_nanoseconds(now-then, &delta);
if ((delta / NSEC_PER_MSEC) > timeout_ms) {
panic("CPU %d tried to IPI CPU %d but didn't get correct response within %dms, respose: %llx", current_cpu_number, i, timeout_ms, *ipi_test_data);
}
}
}
}
#endif
static void
configure_coresight_registers(cpu_data_t *cdp)
{
uint64_t addr;
int i;
assert(cdp);
if (cdp->cpu_regmap_paddr) {
for (i = 0; i < CORESIGHT_REGIONS; ++i) {
if (i == CORESIGHT_CTI)
continue;
if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled)
continue;
if (!cdp->coresight_base[i]) {
addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i);
cdp->coresight_base[i] = (vm_offset_t)ml_io_map(addr, CORESIGHT_SIZE);
if (!cdp->coresight_base[i]) {
panic("unable to ml_io_map coresight regions");
}
}
if (i != CORESIGHT_UTT)
*(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
}
}
}
void
cpu_bootstrap(void)
{
}
void
cpu_sleep(void)
{
cpu_data_t *cpu_data_ptr = getCpuDatap();
pmap_switch_user_ttb(kernel_pmap);
cpu_data_ptr->cpu_active_thread = current_thread();
cpu_data_ptr->cpu_reset_handler = (uintptr_t) start_cpu_paddr;
cpu_data_ptr->cpu_flags |= SleepState;
cpu_data_ptr->cpu_user_debug = NULL;
#if KPC
kpc_idle();
#endif
#if MONOTONIC
mt_cpu_down(cpu_data_ptr);
#endif
CleanPoC_Dcache();
PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id);
}
void __attribute__((noreturn))
cpu_idle(void)
{
cpu_data_t *cpu_data_ptr = getCpuDatap();
uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop;
if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled))
Idle_load_context();
if (!SetIdlePop())
Idle_load_context();
lastPop = cpu_data_ptr->rtcPop;
pmap_switch_user_ttb(kernel_pmap);
cpu_data_ptr->cpu_active_thread = current_thread();
if (cpu_data_ptr->cpu_user_debug)
arm_debug_set(NULL);
cpu_data_ptr->cpu_user_debug = NULL;
if (cpu_data_ptr->cpu_idle_notify)
((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);
if (cpu_data_ptr->idle_timer_notify != 0) {
if (new_idle_timeout_ticks == 0x0ULL) {
cpu_data_ptr->idle_timer_deadline = 0x0ULL;
} else {
clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
}
timer_resync_deadlines();
if (cpu_data_ptr->rtcPop != lastPop)
SetIdlePop();
}
#if KPC
kpc_idle();
#endif
#if MONOTONIC
mt_cpu_idle(cpu_data_ptr);
#endif
if (wfi) {
platform_cache_idle_enter();
#if DEVELOPMENT || DEBUG
if (wfi == 2) {
arm64_force_wfi_clock_gate();
}
#endif
#if defined(APPLECYCLONE) || defined(APPLETYPHOON)
cyclone_typhoon_prepare_for_wfi();
#endif
__builtin_arm_dsb(DSB_SY);
__builtin_arm_wfi();
#if defined(APPLECYCLONE) || defined(APPLETYPHOON)
cyclone_typhoon_return_from_wfi();
#endif
#if DEVELOPMENT || DEBUG
if (wfi == 2) {
uint64_t deadline;
clock_absolutetime_interval_to_deadline(wfi_delay, &deadline);
if ((wfi_flags & 1) != 0) {
InvalidatePoU_Icache();
FlushPoC_Dcache();
}
if ((wfi_flags & 2) != 0) {
flush_core_tlb();
}
clock_delay_until(deadline);
}
#endif
platform_cache_idle_exit();
}
ClearIdlePop(TRUE);
cpu_idle_exit(FALSE);
}
void
cpu_idle_exit(boolean_t from_reset)
{
uint64_t new_idle_timeout_ticks = 0x0ULL;
cpu_data_t *cpu_data_ptr = getCpuDatap();
assert(exception_stack_pointer() != 0);
if (from_reset)
configure_coresight_registers(cpu_data_ptr);
#if KPC
kpc_idle_exit();
#endif
#if MONOTONIC
mt_cpu_run(cpu_data_ptr);
#endif
pmap_switch_user_ttb(cpu_data_ptr->cpu_active_thread->map->pmap);
if (cpu_data_ptr->cpu_idle_notify)
((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks);
if (cpu_data_ptr->idle_timer_notify != 0) {
if (new_idle_timeout_ticks == 0x0ULL) {
cpu_data_ptr->idle_timer_deadline = 0x0ULL;
} else {
clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
}
timer_resync_deadlines();
}
Idle_load_context();
}
void
cpu_init(void)
{
cpu_data_t *cdp = getCpuDatap();
arm_cpu_info_t *cpu_info_p;
assert(exception_stack_pointer() != 0);
if (cdp->cpu_type != CPU_TYPE_ARM64) {
cdp->cpu_type = CPU_TYPE_ARM64;
timer_call_queue_init(&cdp->rtclock_timer.queue);
cdp->rtclock_timer.deadline = EndOfAllTime;
if (cdp == &BootCpuData) {
do_cpuid();
do_cacheid();
do_mvfpid();
} else {
pmap_cpu_data_init();
}
do_debugid();
cpu_info_p = cpuid_info();
switch (cpu_info_p->arm_info.arm_arch) {
case CPU_ARCH_ARMv8:
cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
break;
default:
panic("Unknown CPU subtype...");
break;
}
cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
}
cdp->cpu_stat.irq_ex_cnt_wake = 0;
cdp->cpu_stat.ipi_cnt_wake = 0;
cdp->cpu_stat.timer_cnt_wake = 0;
cdp->cpu_running = TRUE;
cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
cdp->cpu_sleep_token = 0x0UL;
#if KPC
kpc_idle_exit();
#endif
#if MONOTONIC
mt_cpu_up(cdp);
#endif
}
void
cpu_stack_alloc(cpu_data_t *cpu_data_ptr)
{
vm_offset_t irq_stack = 0;
vm_offset_t exc_stack = 0;
kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack,
INTSTACK_SIZE + (2 * PAGE_SIZE),
PAGE_MASK,
KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
VM_KERN_MEMORY_STACK);
if (kr != KERN_SUCCESS)
panic("Unable to allocate cpu interrupt stack\n");
cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE;
cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top;
kr = kernel_memory_allocate(kernel_map, &exc_stack,
EXCEPSTACK_SIZE + (2 * PAGE_SIZE),
PAGE_MASK,
KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT,
VM_KERN_MEMORY_STACK);
if (kr != KERN_SUCCESS)
panic("Unable to allocate cpu exception stack\n");
cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE;
cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top;
}
void
cpu_data_free(cpu_data_t *cpu_data_ptr)
{
if (cpu_data_ptr == &BootCpuData)
return;
cpu_processor_free( cpu_data_ptr->cpu_processor);
kfree( (void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE);
kfree( (void *)(cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE), EXCEPSTACK_SIZE);
kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t));
}
void
cpu_data_init(cpu_data_t *cpu_data_ptr)
{
uint32_t i;
cpu_data_ptr->cpu_flags = 0;
cpu_data_ptr->interrupts_enabled = 0;
cpu_data_ptr->cpu_int_state = 0;
cpu_data_ptr->cpu_pending_ast = AST_NONE;
cpu_data_ptr->cpu_cache_dispatch = (void *) 0;
cpu_data_ptr->rtcPop = EndOfAllTime;
cpu_data_ptr->rtclock_datap = &RTClockData;
cpu_data_ptr->cpu_user_debug = NULL;
cpu_data_ptr->cpu_base_timebase = 0;
cpu_data_ptr->cpu_idle_notify = (void *) 0;
cpu_data_ptr->cpu_idle_latency = 0x0ULL;
cpu_data_ptr->cpu_idle_pop = 0x0ULL;
cpu_data_ptr->cpu_reset_type = 0x0UL;
cpu_data_ptr->cpu_reset_handler = 0x0UL;
cpu_data_ptr->cpu_reset_assist = 0x0UL;
cpu_data_ptr->cpu_regmap_paddr = 0x0ULL;
cpu_data_ptr->cpu_phys_id = 0x0UL;
cpu_data_ptr->cpu_l2_access_penalty = 0;
cpu_data_ptr->cpu_cluster_type = CLUSTER_TYPE_SMP;
cpu_data_ptr->cpu_cluster_id = 0;
cpu_data_ptr->cpu_l2_id = 0;
cpu_data_ptr->cpu_l2_size = 0;
cpu_data_ptr->cpu_l3_id = 0;
cpu_data_ptr->cpu_l3_size = 0;
cpu_data_ptr->cpu_signal = SIGPdisabled;
#if DEBUG || DEVELOPMENT
cpu_data_ptr->failed_xcall = NULL;
cpu_data_ptr->failed_signal = 0;
cpu_data_ptr->failed_signal_count = 0;
#endif
cpu_data_ptr->cpu_get_fiq_handler = NULL;
cpu_data_ptr->cpu_tbd_hardware_addr = NULL;
cpu_data_ptr->cpu_tbd_hardware_val = NULL;
cpu_data_ptr->cpu_get_decrementer_func = NULL;
cpu_data_ptr->cpu_set_decrementer_func = NULL;
cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
cpu_data_ptr->cpu_sleep_token_last = 0x00000000UL;
cpu_data_ptr->cpu_xcall_p0 = NULL;
cpu_data_ptr->cpu_xcall_p1 = NULL;
for (i = 0; i < CORESIGHT_REGIONS; ++i) {
cpu_data_ptr->coresight_base[i] = 0;
}
pmap_cpu_data_t * pmap_cpu_data_ptr = &cpu_data_ptr->cpu_pmap_cpu_data;
pmap_cpu_data_ptr->cpu_nested_pmap = (struct pmap *) NULL;
pmap_cpu_data_ptr->cpu_number = PMAP_INVALID_CPU_NUM;
for (i = 0; i < (sizeof(pmap_cpu_data_ptr->cpu_asid_high_bits) / sizeof(*pmap_cpu_data_ptr->cpu_asid_high_bits)); i++) {
pmap_cpu_data_ptr->cpu_asid_high_bits[i] = 0;
}
cpu_data_ptr->halt_status = CPU_NOT_HALTED;
#if __ARM_KERNEL_PROTECT__
cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&exc_vectors_table;
#endif
}
kern_return_t
cpu_data_register(cpu_data_t *cpu_data_ptr)
{
int cpu = cpu_data_ptr->cpu_number;
#if KASAN
for (int i = 0; i < CPUWINDOWS_MAX; i++) {
kasan_notify_address_nopoison(pmap_cpu_windows_copy_addr(cpu, i), PAGE_SIZE);
}
#endif
CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr;
CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys( (vm_offset_t)cpu_data_ptr);
return KERN_SUCCESS;
}
kern_return_t
cpu_start(int cpu)
{
cpu_data_t *cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
kprintf("cpu_start() cpu: %d\n", cpu);
if (cpu == cpu_number()) {
cpu_machine_init();
configure_coresight_registers(cpu_data_ptr);
} else {
thread_t first_thread;
cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr;
cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL;
if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL)
first_thread = cpu_data_ptr->cpu_processor->next_thread;
else
first_thread = cpu_data_ptr->cpu_processor->idle_thread;
cpu_data_ptr->cpu_active_thread = first_thread;
first_thread->machine.CpuDatap = cpu_data_ptr;
configure_coresight_registers(cpu_data_ptr);
flush_dcache((vm_offset_t)&CpuDataEntries[cpu], sizeof(cpu_data_entry_t), FALSE);
flush_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
(void) PE_cpu_start(cpu_data_ptr->cpu_id, (vm_offset_t)NULL, (vm_offset_t)NULL);
}
return KERN_SUCCESS;
}
void
cpu_timebase_init(boolean_t from_boot)
{
cpu_data_t *cdp = getCpuDatap();
if (cdp->cpu_get_fiq_handler == NULL) {
cdp->cpu_get_fiq_handler = rtclock_timebase_func.tbd_fiq_handler;
cdp->cpu_get_decrementer_func = rtclock_timebase_func.tbd_get_decrementer;
cdp->cpu_set_decrementer_func = rtclock_timebase_func.tbd_set_decrementer;
cdp->cpu_tbd_hardware_addr = (void *)rtclock_timebase_addr;
cdp->cpu_tbd_hardware_val = (void *)rtclock_timebase_val;
}
if (!from_boot && (cdp == &BootCpuData)) {
rtclock_base_abstime = wake_abstime - ml_get_hwclock();
}
cdp->cpu_decrementer = 0x7FFFFFFFUL;
cdp->cpu_timebase = 0x0UL;
cdp->cpu_base_timebase = rtclock_base_abstime;
}
int
cpu_cluster_id(void)
{
return (getCpuDatap()->cpu_cluster_id);
}
__attribute__((noreturn))
void
ml_arm_sleep(void)
{
cpu_data_t *cpu_data_ptr = getCpuDatap();
if (cpu_data_ptr == &BootCpuData) {
cpu_data_t *target_cdp;
int cpu;
int max_cpu;
max_cpu = ml_get_max_cpu_number();
for (cpu=0; cpu <= max_cpu; cpu++) {
target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr))
continue;
while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH);
}
wake_abstime = ml_get_timebase();
} else {
CleanPoU_Dcache();
}
cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH;
if (cpu_data_ptr == &BootCpuData) {
#if WITH_CLASSIC_S2R
if(sleepTokenBuffer != (vm_offset_t) NULL) {
platform_cache_shutdown();
bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken));
}
else {
panic("No sleep token buffer");
}
#endif
#if __ARM_GLOBAL_SLEEP_BIT__
arm64_stall_sleep = FALSE;
__builtin_arm_dmb(DMB_ISH);
#endif
if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
*(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
*(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
}
#if MONOTONIC
mt_sleep();
#endif
arm64_prepare_for_sleep();
} else {
#if __ARM_GLOBAL_SLEEP_BIT__
while (arm64_stall_sleep) {
__builtin_arm_wfe();
}
#endif
CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t));
if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) {
*(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
*(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGPRCR) = 0;
}
arm64_prepare_for_sleep();
}
}
void
cpu_machine_idle_init(boolean_t from_boot)
{
static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL;
cpu_data_t *cpu_data_ptr = getCpuDatap();
if (from_boot) {
unsigned long jtag = 0;
int wfi_tmp = 1;
uint32_t production = 1;
DTEntry entry;
if (PE_parse_boot_argn("jtag", &jtag, sizeof (jtag))) {
if (jtag != 0)
idle_enable = FALSE;
else
idle_enable = TRUE;
} else
idle_enable = TRUE;
PE_parse_boot_argn("wfi", &wfi_tmp, sizeof (wfi_tmp));
switch (wfi_tmp & 0xff) {
case 0 :
wfi = 0;
break;
#if DEVELOPMENT || DEBUG
case 2 :
wfi = 2;
wfi_flags = (wfi_tmp >> 8) & 0xFF;
nanoseconds_to_absolutetime(((wfi_tmp >> 16) & 0xFFFF) * NSEC_PER_MSEC, &wfi_delay);
break;
#endif
case 1 :
default :
break;
}
ResetHandlerData.assist_reset_handler = 0;
ResetHandlerData.cpu_data_entries = ml_static_vtop((vm_offset_t)CpuDataEntries);
#ifdef MONITOR
monitor_call(MONITOR_SET_ENTRY, (uintptr_t)ml_static_vtop((vm_offset_t)&LowResetVectorBase), 0, 0);
#elif !defined(NO_MONITOR)
#error MONITOR undefined, WFI power gating may not operate correctly
#endif
if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) {
unsigned int size;
void *prop;
if (kSuccess == DTGetProperty(entry, "effective-production-status-ap", &prop, &size))
if (size == 4)
bcopy(prop, &production, size);
}
if (!production) {
#if defined(APPLE_ARM64_ARCH_FAMILY)
coresight_debug_enabled = TRUE;
#endif
}
start_cpu_paddr = ml_static_vtop((vm_offset_t)&start_cpu);
resume_idle_cpu_paddr = ml_static_vtop((vm_offset_t)&resume_idle_cpu);
}
#if WITH_CLASSIC_S2R
if (cpu_data_ptr == &BootCpuData) {
static addr64_t SleepToken_low_paddr = (addr64_t)NULL;
if (sleepTokenBuffer != (vm_offset_t) NULL) {
SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer);
}
else {
panic("No sleep token buffer");
}
bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature),
SleepToken_low_paddr, sizeof(SleepToken));
flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE);
};
#endif
cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr;
clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE);
}
_Atomic uint32_t cpu_idle_count = 0;
void
machine_track_platform_idle(boolean_t entry)
{
if (entry)
(void)__c11_atomic_fetch_add(&cpu_idle_count, 1, __ATOMIC_RELAXED);
else
(void)__c11_atomic_fetch_sub(&cpu_idle_count, 1, __ATOMIC_RELAXED);
}
#if WITH_CLASSIC_S2R
void
sleep_token_buffer_init(void)
{
cpu_data_t *cpu_data_ptr = getCpuDatap();
DTEntry entry;
size_t size;
void **prop;
if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) {
if (kSuccess != DTLookupEntry(0, "stram", &entry))
return;
if (kSuccess != DTGetProperty(entry, "reg", (void **)&prop, (unsigned int *)&size))
return;
sleepTokenBuffer = ml_io_map(((vm_offset_t *)prop)[0], ((vm_size_t *)prop)[1]);
}
}
#endif