mirror of
				https://git.suyu.dev/suyu/suyu
				synced 2025-11-04 00:49:02 -06:00 
			
		
		
		
	kernel: remove kernel_
This commit is contained in:
		@@ -12,11 +12,11 @@ KAutoObject* KAutoObject::Create(KAutoObject* obj) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KAutoObject::RegisterWithKernel() {
 | 
			
		||||
    kernel.RegisterKernelObject(this);
 | 
			
		||||
    m_kernel.RegisterKernelObject(this);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KAutoObject::UnregisterWithKernel() {
 | 
			
		||||
    kernel.UnregisterKernelObject(this);
 | 
			
		||||
    m_kernel.UnregisterKernelObject(this);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 
 | 
			
		||||
@@ -80,7 +80,7 @@ private:
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {
 | 
			
		||||
    explicit KAutoObject(KernelCore& kernel) : m_kernel(kernel) {
 | 
			
		||||
        RegisterWithKernel();
 | 
			
		||||
    }
 | 
			
		||||
    virtual ~KAutoObject() = default;
 | 
			
		||||
@@ -169,7 +169,7 @@ private:
 | 
			
		||||
    void UnregisterWithKernel();
 | 
			
		||||
 | 
			
		||||
protected:
 | 
			
		||||
    KernelCore& kernel;
 | 
			
		||||
    KernelCore& m_kernel;
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    std::atomic<u32> m_ref_count{};
 | 
			
		||||
@@ -179,7 +179,7 @@ class KAutoObjectWithListContainer;
 | 
			
		||||
 | 
			
		||||
class KAutoObjectWithList : public KAutoObject, public boost::intrusive::set_base_hook<> {
 | 
			
		||||
public:
 | 
			
		||||
    explicit KAutoObjectWithList(KernelCore& kernel_) : KAutoObject(kernel_) {}
 | 
			
		||||
    explicit KAutoObjectWithList(KernelCore& kernel) : KAutoObject(kernel) {}
 | 
			
		||||
 | 
			
		||||
    static int Compare(const KAutoObjectWithList& lhs, const KAutoObjectWithList& rhs) {
 | 
			
		||||
        const u64 lid = lhs.GetId();
 | 
			
		||||
 
 | 
			
		||||
@@ -11,7 +11,7 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KClientPort::KClientPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
 | 
			
		||||
KClientPort::KClientPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
 | 
			
		||||
KClientPort::~KClientPort() = default;
 | 
			
		||||
 | 
			
		||||
void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
 | 
			
		||||
@@ -23,7 +23,7 @@ void KClientPort::Initialize(KPort* parent, s32 max_sessions) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KClientPort::OnSessionFinalized() {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    if (const auto prev = m_num_sessions--; prev == m_max_sessions) {
 | 
			
		||||
        this->NotifyAvailable();
 | 
			
		||||
@@ -58,12 +58,12 @@ Result KClientPort::CreateSession(KClientSession** out) {
 | 
			
		||||
 | 
			
		||||
    // Reserve a new session from the resource limit.
 | 
			
		||||
    //! FIXME: we are reserving this from the wrong resource limit!
 | 
			
		||||
    KScopedResourceReservation session_reservation(kernel.ApplicationProcess()->GetResourceLimit(),
 | 
			
		||||
                                                   LimitableResource::SessionCountMax);
 | 
			
		||||
    KScopedResourceReservation session_reservation(
 | 
			
		||||
        m_kernel.ApplicationProcess()->GetResourceLimit(), LimitableResource::SessionCountMax);
 | 
			
		||||
    R_UNLESS(session_reservation.Succeeded(), ResultLimitReached);
 | 
			
		||||
 | 
			
		||||
    // Allocate a session normally.
 | 
			
		||||
    session = KSession::Create(kernel);
 | 
			
		||||
    session = KSession::Create(m_kernel);
 | 
			
		||||
 | 
			
		||||
    // Check that we successfully created a session.
 | 
			
		||||
    R_UNLESS(session != nullptr, ResultOutOfResource);
 | 
			
		||||
@@ -105,7 +105,7 @@ Result KClientPort::CreateSession(KClientSession** out) {
 | 
			
		||||
    session_reservation.Commit();
 | 
			
		||||
 | 
			
		||||
    // Register the session.
 | 
			
		||||
    KSession::Register(kernel, session);
 | 
			
		||||
    KSession::Register(m_kernel, session);
 | 
			
		||||
    ON_RESULT_FAILURE {
 | 
			
		||||
        session->GetClientSession().Close();
 | 
			
		||||
        session->GetServerSession().Close();
 | 
			
		||||
 
 | 
			
		||||
@@ -12,8 +12,7 @@ namespace Kernel {
 | 
			
		||||
 | 
			
		||||
static constexpr u32 MessageBufferSize = 0x100;
 | 
			
		||||
 | 
			
		||||
KClientSession::KClientSession(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
 | 
			
		||||
KClientSession::KClientSession(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
 | 
			
		||||
KClientSession::~KClientSession() = default;
 | 
			
		||||
 | 
			
		||||
void KClientSession::Destroy() {
 | 
			
		||||
@@ -25,12 +24,12 @@ void KClientSession::OnServerClosed() {}
 | 
			
		||||
 | 
			
		||||
Result KClientSession::SendSyncRequest() {
 | 
			
		||||
    // Create a session request.
 | 
			
		||||
    KSessionRequest* request = KSessionRequest::Create(kernel);
 | 
			
		||||
    KSessionRequest* request = KSessionRequest::Create(m_kernel);
 | 
			
		||||
    R_UNLESS(request != nullptr, ResultOutOfResource);
 | 
			
		||||
    SCOPE_EXIT({ request->Close(); });
 | 
			
		||||
 | 
			
		||||
    // Initialize the request.
 | 
			
		||||
    request->Initialize(nullptr, GetCurrentThread(kernel).GetTLSAddress(), MessageBufferSize);
 | 
			
		||||
    request->Initialize(nullptr, GetCurrentThread(m_kernel).GetTLSAddress(), MessageBufferSize);
 | 
			
		||||
 | 
			
		||||
    // Send the request.
 | 
			
		||||
    R_RETURN(m_parent->GetServerSession().OnRequest(request));
 | 
			
		||||
 
 | 
			
		||||
@@ -30,7 +30,7 @@ class KClientSession final
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KClientSession, KAutoObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KClientSession(KernelCore& kernel_);
 | 
			
		||||
    explicit KClientSession(KernelCore& kernel);
 | 
			
		||||
    ~KClientSession() override;
 | 
			
		||||
 | 
			
		||||
    void Initialize(KSession* parent) {
 | 
			
		||||
 
 | 
			
		||||
@@ -16,18 +16,18 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KCodeMemory::KCodeMemory(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_lock(kernel_) {}
 | 
			
		||||
KCodeMemory::KCodeMemory(KernelCore& kernel)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel}, m_lock(kernel) {}
 | 
			
		||||
 | 
			
		||||
Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, size_t size) {
 | 
			
		||||
    // Set members.
 | 
			
		||||
    m_owner = GetCurrentProcessPointer(kernel);
 | 
			
		||||
    m_owner = GetCurrentProcessPointer(m_kernel);
 | 
			
		||||
 | 
			
		||||
    // Get the owner page table.
 | 
			
		||||
    auto& page_table = m_owner->PageTable();
 | 
			
		||||
 | 
			
		||||
    // Construct the page group.
 | 
			
		||||
    m_page_group.emplace(kernel, page_table.GetBlockInfoManager());
 | 
			
		||||
    m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
 | 
			
		||||
 | 
			
		||||
    // Lock the memory.
 | 
			
		||||
    R_TRY(page_table.LockForCodeMemory(std::addressof(*m_page_group), addr, size))
 | 
			
		||||
@@ -74,7 +74,7 @@ Result KCodeMemory::Map(VAddr address, size_t size) {
 | 
			
		||||
    R_UNLESS(!m_is_mapped, ResultInvalidState);
 | 
			
		||||
 | 
			
		||||
    // Map the memory.
 | 
			
		||||
    R_TRY(GetCurrentProcess(kernel).PageTable().MapPageGroup(
 | 
			
		||||
    R_TRY(GetCurrentProcess(m_kernel).PageTable().MapPageGroup(
 | 
			
		||||
        address, *m_page_group, KMemoryState::CodeOut, KMemoryPermission::UserReadWrite));
 | 
			
		||||
 | 
			
		||||
    // Mark ourselves as mapped.
 | 
			
		||||
@@ -91,8 +91,8 @@ Result KCodeMemory::Unmap(VAddr address, size_t size) {
 | 
			
		||||
    KScopedLightLock lk(m_lock);
 | 
			
		||||
 | 
			
		||||
    // Unmap the memory.
 | 
			
		||||
    R_TRY(GetCurrentProcess(kernel).PageTable().UnmapPageGroup(address, *m_page_group,
 | 
			
		||||
                                                               KMemoryState::CodeOut));
 | 
			
		||||
    R_TRY(GetCurrentProcess(m_kernel).PageTable().UnmapPageGroup(address, *m_page_group,
 | 
			
		||||
                                                                 KMemoryState::CodeOut));
 | 
			
		||||
 | 
			
		||||
    // Mark ourselves as unmapped.
 | 
			
		||||
    m_is_mapped = false;
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ class KCodeMemory final
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KCodeMemory(KernelCore& kernel_);
 | 
			
		||||
    explicit KCodeMemory(KernelCore& kernel);
 | 
			
		||||
 | 
			
		||||
    Result Initialize(Core::DeviceMemory& device_memory, VAddr address, size_t size);
 | 
			
		||||
    void Finalize() override;
 | 
			
		||||
 
 | 
			
		||||
@@ -57,8 +57,8 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
 | 
			
		||||
 | 
			
		||||
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
 | 
			
		||||
public:
 | 
			
		||||
    explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
 | 
			
		||||
        : KThreadQueue(kernel_) {}
 | 
			
		||||
    explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel)
 | 
			
		||||
        : KThreadQueue(kernel) {}
 | 
			
		||||
 | 
			
		||||
    void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
 | 
			
		||||
        // Remove the thread as a waiter from its owner.
 | 
			
		||||
@@ -75,8 +75,8 @@ private:
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
 | 
			
		||||
        KernelCore& kernel_, KConditionVariable::ThreadTree* t)
 | 
			
		||||
        : KThreadQueue(kernel_), m_tree(t) {}
 | 
			
		||||
        KernelCore& kernel, KConditionVariable::ThreadTree* t)
 | 
			
		||||
        : KThreadQueue(kernel), m_tree(t) {}
 | 
			
		||||
 | 
			
		||||
    void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
 | 
			
		||||
        // Remove the thread as a waiter from its owner.
 | 
			
		||||
 
 | 
			
		||||
@@ -12,7 +12,7 @@ class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KAutoObj
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KDebug, KAutoObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KDebug(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
 | 
			
		||||
    explicit KDebug(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
 | 
			
		||||
 | 
			
		||||
    static void PostDestroy(uintptr_t arg) {}
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -9,8 +9,8 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer(kernel_), m_lock(kernel_), m_is_initialized(false) {}
 | 
			
		||||
KDeviceAddressSpace::KDeviceAddressSpace(KernelCore& kernel)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer(kernel), m_lock(kernel), m_is_initialized(false) {}
 | 
			
		||||
KDeviceAddressSpace::~KDeviceAddressSpace() = default;
 | 
			
		||||
 | 
			
		||||
void KDeviceAddressSpace::Initialize() {
 | 
			
		||||
 
 | 
			
		||||
@@ -7,8 +7,8 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KEvent::KEvent(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_readable_event{kernel_} {}
 | 
			
		||||
KEvent::KEvent(KernelCore& kernel)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel}, m_readable_event{kernel} {}
 | 
			
		||||
 | 
			
		||||
KEvent::~KEvent() = default;
 | 
			
		||||
 | 
			
		||||
@@ -36,7 +36,7 @@ void KEvent::Finalize() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KEvent::Signal() {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    R_SUCCEED_IF(m_readable_event_destroyed);
 | 
			
		||||
 | 
			
		||||
@@ -44,7 +44,7 @@ Result KEvent::Signal() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KEvent::Clear() {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    R_SUCCEED_IF(m_readable_event_destroyed);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -16,7 +16,7 @@ class KEvent final : public KAutoObjectWithSlabHeapAndContainer<KEvent, KAutoObj
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KEvent, KAutoObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KEvent(KernelCore& kernel_);
 | 
			
		||||
    explicit KEvent(KernelCore& kernel);
 | 
			
		||||
    ~KEvent() override;
 | 
			
		||||
 | 
			
		||||
    void Initialize(KProcess* owner);
 | 
			
		||||
 
 | 
			
		||||
@@ -7,8 +7,8 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KPort::KPort(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_server{kernel_}, m_client{kernel_} {}
 | 
			
		||||
KPort::KPort(KernelCore& kernel)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
 | 
			
		||||
 | 
			
		||||
KPort::~KPort() = default;
 | 
			
		||||
 | 
			
		||||
@@ -29,7 +29,7 @@ void KPort::Initialize(s32 max_sessions, bool is_light, uintptr_t name) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KPort::OnClientClosed() {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    if (m_state == State::Normal) {
 | 
			
		||||
        m_state = State::ClientClosed;
 | 
			
		||||
@@ -37,7 +37,7 @@ void KPort::OnClientClosed() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KPort::OnServerClosed() {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    if (m_state == State::Normal) {
 | 
			
		||||
        m_state = State::ServerClosed;
 | 
			
		||||
@@ -45,12 +45,12 @@ void KPort::OnServerClosed() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool KPort::IsServerClosed() const {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
    return m_state == State::ServerClosed;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KPort::EnqueueSession(KServerSession* session) {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    R_UNLESS(m_state == State::Normal, ResultPortClosed);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -19,7 +19,7 @@ class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjec
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KPort, KAutoObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KPort(KernelCore& kernel_);
 | 
			
		||||
    explicit KPort(KernelCore& kernel);
 | 
			
		||||
    ~KPort() override;
 | 
			
		||||
 | 
			
		||||
    static void PostDestroy(uintptr_t arg) {}
 | 
			
		||||
 
 | 
			
		||||
@@ -126,7 +126,7 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() {
 | 
			
		||||
    const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemoryMax) +
 | 
			
		||||
                       page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
 | 
			
		||||
                       main_thread_stack_size};
 | 
			
		||||
    if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
 | 
			
		||||
    if (const auto pool_size = m_kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
 | 
			
		||||
        capacity != pool_size) {
 | 
			
		||||
        LOG_WARNING(Kernel, "capacity {} != application pool size {}", capacity, pool_size);
 | 
			
		||||
    }
 | 
			
		||||
@@ -150,7 +150,7 @@ u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool KProcess::ReleaseUserException(KThread* thread) {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    if (exception_thread == thread) {
 | 
			
		||||
        exception_thread = nullptr;
 | 
			
		||||
@@ -164,7 +164,7 @@ bool KProcess::ReleaseUserException(KThread* thread) {
 | 
			
		||||
            next->EndWait(ResultSuccess);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        KScheduler::SetSchedulerUpdateNeeded(kernel);
 | 
			
		||||
        KScheduler::SetSchedulerUpdateNeeded(m_kernel);
 | 
			
		||||
 | 
			
		||||
        return true;
 | 
			
		||||
    } else {
 | 
			
		||||
@@ -173,11 +173,11 @@ bool KProcess::ReleaseUserException(KThread* thread) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KProcess::PinCurrentThread(s32 core_id) {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
 | 
			
		||||
    // Get the current thread.
 | 
			
		||||
    KThread* cur_thread =
 | 
			
		||||
        kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
 | 
			
		||||
        m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
 | 
			
		||||
 | 
			
		||||
    // If the thread isn't terminated, pin it.
 | 
			
		||||
    if (!cur_thread->IsTerminationRequested()) {
 | 
			
		||||
@@ -186,27 +186,27 @@ void KProcess::PinCurrentThread(s32 core_id) {
 | 
			
		||||
        cur_thread->Pin(core_id);
 | 
			
		||||
 | 
			
		||||
        // An update is needed.
 | 
			
		||||
        KScheduler::SetSchedulerUpdateNeeded(kernel);
 | 
			
		||||
        KScheduler::SetSchedulerUpdateNeeded(m_kernel);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KProcess::UnpinCurrentThread(s32 core_id) {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
 | 
			
		||||
    // Get the current thread.
 | 
			
		||||
    KThread* cur_thread =
 | 
			
		||||
        kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
 | 
			
		||||
        m_kernel.Scheduler(static_cast<std::size_t>(core_id)).GetSchedulerCurrentThread();
 | 
			
		||||
 | 
			
		||||
    // Unpin it.
 | 
			
		||||
    cur_thread->Unpin();
 | 
			
		||||
    UnpinThread(core_id, cur_thread);
 | 
			
		||||
 | 
			
		||||
    // An update is needed.
 | 
			
		||||
    KScheduler::SetSchedulerUpdateNeeded(kernel);
 | 
			
		||||
    KScheduler::SetSchedulerUpdateNeeded(m_kernel);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KProcess::UnpinThread(KThread* thread) {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
 | 
			
		||||
    // Get the thread's core id.
 | 
			
		||||
    const auto core_id = thread->GetActiveCore();
 | 
			
		||||
@@ -216,7 +216,7 @@ void KProcess::UnpinThread(KThread* thread) {
 | 
			
		||||
    thread->Unpin();
 | 
			
		||||
 | 
			
		||||
    // An update is needed.
 | 
			
		||||
    KScheduler::SetSchedulerUpdateNeeded(kernel);
 | 
			
		||||
    KScheduler::SetSchedulerUpdateNeeded(m_kernel);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
 | 
			
		||||
@@ -234,7 +234,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (shemen_info == nullptr) {
 | 
			
		||||
        shemen_info = KSharedMemoryInfo::Allocate(kernel);
 | 
			
		||||
        shemen_info = KSharedMemoryInfo::Allocate(m_kernel);
 | 
			
		||||
        R_UNLESS(shemen_info != nullptr, ResultOutOfMemory);
 | 
			
		||||
 | 
			
		||||
        shemen_info->Initialize(shmem);
 | 
			
		||||
@@ -265,7 +265,7 @@ void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr a
 | 
			
		||||
 | 
			
		||||
    if (shemen_info->Close()) {
 | 
			
		||||
        shared_memory_list.erase(iter);
 | 
			
		||||
        KSharedMemoryInfo::Free(kernel, shemen_info);
 | 
			
		||||
        KSharedMemoryInfo::Free(m_kernel, shemen_info);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Close a reference to the shared memory.
 | 
			
		||||
@@ -298,7 +298,7 @@ u64 KProcess::GetFreeThreadCount() const {
 | 
			
		||||
Result KProcess::Reset() {
 | 
			
		||||
    // Lock the process and the scheduler.
 | 
			
		||||
    KScopedLightLock lk(state_lock);
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Validate that we're in a state that we can reset.
 | 
			
		||||
    R_UNLESS(state != State::Terminated, ResultInvalidState);
 | 
			
		||||
@@ -313,7 +313,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
 | 
			
		||||
    // Lock ourselves and the scheduler.
 | 
			
		||||
    KScopedLightLock lk{state_lock};
 | 
			
		||||
    KScopedLightLock list_lk{list_lock};
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Validate our state.
 | 
			
		||||
    R_UNLESS(state != State::Terminating, ResultInvalidState);
 | 
			
		||||
@@ -366,7 +366,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
 | 
			
		||||
    // Initialize process address space
 | 
			
		||||
    if (const Result result{page_table.InitializeForProcess(
 | 
			
		||||
            metadata.GetAddressSpaceType(), false, false, false, KMemoryManager::Pool::Application,
 | 
			
		||||
            0x8000000, code_size, &kernel.GetAppSystemResource(), resource_limit)};
 | 
			
		||||
            0x8000000, code_size, &m_kernel.GetAppSystemResource(), resource_limit)};
 | 
			
		||||
        result.IsError()) {
 | 
			
		||||
        R_RETURN(result);
 | 
			
		||||
    }
 | 
			
		||||
@@ -421,7 +421,7 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
 | 
			
		||||
 | 
			
		||||
    ChangeState(State::Running);
 | 
			
		||||
 | 
			
		||||
    SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
 | 
			
		||||
    SetupMainThread(m_kernel.System(), *this, main_thread_priority, main_thread_stack_top);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KProcess::PrepareForTermination() {
 | 
			
		||||
@@ -432,7 +432,7 @@ void KProcess::PrepareForTermination() {
 | 
			
		||||
            if (thread->GetOwnerProcess() != this)
 | 
			
		||||
                continue;
 | 
			
		||||
 | 
			
		||||
            if (thread == GetCurrentThreadPointer(kernel))
 | 
			
		||||
            if (thread == GetCurrentThreadPointer(m_kernel))
 | 
			
		||||
                continue;
 | 
			
		||||
 | 
			
		||||
            // TODO(Subv): When are the other running/ready threads terminated?
 | 
			
		||||
@@ -443,7 +443,7 @@ void KProcess::PrepareForTermination() {
 | 
			
		||||
        }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
 | 
			
		||||
    stop_threads(m_kernel.System().GlobalSchedulerContext().GetThreadList());
 | 
			
		||||
 | 
			
		||||
    this->DeleteThreadLocalRegion(plr_address);
 | 
			
		||||
    plr_address = 0;
 | 
			
		||||
@@ -471,7 +471,7 @@ void KProcess::Finalize() {
 | 
			
		||||
            shmem->Close();
 | 
			
		||||
 | 
			
		||||
            it = shared_memory_list.erase(it);
 | 
			
		||||
            KSharedMemoryInfo::Free(kernel, info);
 | 
			
		||||
            KSharedMemoryInfo::Free(m_kernel, info);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -494,7 +494,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
 | 
			
		||||
 | 
			
		||||
    // See if we can get a region from a partially used TLP.
 | 
			
		||||
    {
 | 
			
		||||
        KScopedSchedulerLock sl{kernel};
 | 
			
		||||
        KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
        if (auto it = partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
 | 
			
		||||
            tlr = it->Reserve();
 | 
			
		||||
@@ -512,12 +512,12 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Allocate a new page.
 | 
			
		||||
    tlp = KThreadLocalPage::Allocate(kernel);
 | 
			
		||||
    tlp = KThreadLocalPage::Allocate(m_kernel);
 | 
			
		||||
    R_UNLESS(tlp != nullptr, ResultOutOfMemory);
 | 
			
		||||
    auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(kernel, tlp); });
 | 
			
		||||
    auto tlp_guard = SCOPE_GUARD({ KThreadLocalPage::Free(m_kernel, tlp); });
 | 
			
		||||
 | 
			
		||||
    // Initialize the new page.
 | 
			
		||||
    R_TRY(tlp->Initialize(kernel, this));
 | 
			
		||||
    R_TRY(tlp->Initialize(m_kernel, this));
 | 
			
		||||
 | 
			
		||||
    // Reserve a TLR.
 | 
			
		||||
    tlr = tlp->Reserve();
 | 
			
		||||
@@ -525,7 +525,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
 | 
			
		||||
 | 
			
		||||
    // Insert into our tree.
 | 
			
		||||
    {
 | 
			
		||||
        KScopedSchedulerLock sl{kernel};
 | 
			
		||||
        KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
        if (tlp->IsAllUsed()) {
 | 
			
		||||
            fully_used_tlp_tree.insert(*tlp);
 | 
			
		||||
        } else {
 | 
			
		||||
@@ -544,7 +544,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
 | 
			
		||||
 | 
			
		||||
    // Release the region.
 | 
			
		||||
    {
 | 
			
		||||
        KScopedSchedulerLock sl{kernel};
 | 
			
		||||
        KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
        // Try to find the page in the partially used list.
 | 
			
		||||
        auto it = partially_used_tlp_tree.find_key(Common::AlignDown(addr, PageSize));
 | 
			
		||||
@@ -581,7 +581,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
 | 
			
		||||
    if (page_to_free != nullptr) {
 | 
			
		||||
        page_to_free->Finalize();
 | 
			
		||||
 | 
			
		||||
        KThreadLocalPage::Free(kernel, page_to_free);
 | 
			
		||||
        KThreadLocalPage::Free(m_kernel, page_to_free);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    R_SUCCEED();
 | 
			
		||||
@@ -639,8 +639,8 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
 | 
			
		||||
        page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
 | 
			
		||||
                                        code_set.memory.size());
 | 
			
		||||
    m_kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
 | 
			
		||||
                                          code_set.memory.size());
 | 
			
		||||
 | 
			
		||||
    ReprotectSegment(code_set.CodeSegment(), Svc::MemoryPermission::ReadExecute);
 | 
			
		||||
    ReprotectSegment(code_set.RODataSegment(), Svc::MemoryPermission::Read);
 | 
			
		||||
@@ -648,14 +648,14 @@ void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool KProcess::IsSignaled() const {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    return is_signaled;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
KProcess::KProcess(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
 | 
			
		||||
      handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
 | 
			
		||||
      state_lock{kernel_}, list_lock{kernel_} {}
 | 
			
		||||
KProcess::KProcess(KernelCore& kernel)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel}, page_table{m_kernel.System()},
 | 
			
		||||
      handle_table{m_kernel}, address_arbiter{m_kernel.System()}, condition_var{m_kernel.System()},
 | 
			
		||||
      state_lock{m_kernel}, list_lock{m_kernel} {}
 | 
			
		||||
 | 
			
		||||
KProcess::~KProcess() = default;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -68,7 +68,7 @@ class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWor
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KProcess(KernelCore& kernel_);
 | 
			
		||||
    explicit KProcess(KernelCore& kernel);
 | 
			
		||||
    ~KProcess() override;
 | 
			
		||||
 | 
			
		||||
    enum class State {
 | 
			
		||||
 
 | 
			
		||||
@@ -11,7 +11,7 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KReadableEvent::KReadableEvent(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
 | 
			
		||||
KReadableEvent::KReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
 | 
			
		||||
 | 
			
		||||
KReadableEvent::~KReadableEvent() = default;
 | 
			
		||||
 | 
			
		||||
@@ -25,7 +25,7 @@ void KReadableEvent::Initialize(KEvent* parent) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool KReadableEvent::IsSignaled() const {
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 | 
			
		||||
 | 
			
		||||
    return m_is_signaled;
 | 
			
		||||
}
 | 
			
		||||
@@ -33,7 +33,7 @@ bool KReadableEvent::IsSignaled() const {
 | 
			
		||||
void KReadableEvent::Destroy() {
 | 
			
		||||
    if (m_parent) {
 | 
			
		||||
        {
 | 
			
		||||
            KScopedSchedulerLock sl{kernel};
 | 
			
		||||
            KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
            m_parent->OnReadableEventDestroyed();
 | 
			
		||||
        }
 | 
			
		||||
        m_parent->Close();
 | 
			
		||||
@@ -41,7 +41,7 @@ void KReadableEvent::Destroy() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KReadableEvent::Signal() {
 | 
			
		||||
    KScopedSchedulerLock lk{kernel};
 | 
			
		||||
    KScopedSchedulerLock lk{m_kernel};
 | 
			
		||||
 | 
			
		||||
    if (!m_is_signaled) {
 | 
			
		||||
        m_is_signaled = true;
 | 
			
		||||
@@ -58,7 +58,7 @@ Result KReadableEvent::Clear() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KReadableEvent::Reset() {
 | 
			
		||||
    KScopedSchedulerLock lk{kernel};
 | 
			
		||||
    KScopedSchedulerLock lk{m_kernel};
 | 
			
		||||
 | 
			
		||||
    R_UNLESS(m_is_signaled, ResultInvalidState);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -17,7 +17,7 @@ class KReadableEvent : public KSynchronizationObject {
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KReadableEvent(KernelCore& kernel_);
 | 
			
		||||
    explicit KReadableEvent(KernelCore& kernel);
 | 
			
		||||
    ~KReadableEvent() override;
 | 
			
		||||
 | 
			
		||||
    void Initialize(KEvent* parent);
 | 
			
		||||
 
 | 
			
		||||
@@ -11,8 +11,8 @@
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
constexpr s64 DefaultTimeout = 10000000000; // 10 seconds
 | 
			
		||||
 | 
			
		||||
KResourceLimit::KResourceLimit(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel_}, lock{kernel_}, cond_var{kernel_} {}
 | 
			
		||||
KResourceLimit::KResourceLimit(KernelCore& kernel)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel}, lock{kernel}, cond_var{kernel} {}
 | 
			
		||||
KResourceLimit::~KResourceLimit() = default;
 | 
			
		||||
 | 
			
		||||
void KResourceLimit::Initialize(const Core::Timing::CoreTiming* core_timing_) {
 | 
			
		||||
 
 | 
			
		||||
@@ -27,7 +27,7 @@ static void IncrementScheduledCount(Kernel::KThread* thread) {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
KScheduler::KScheduler(KernelCore& kernel_) : kernel{kernel_} {
 | 
			
		||||
KScheduler::KScheduler(KernelCore& kernel) : m_kernel{kernel} {
 | 
			
		||||
    m_switch_fiber = std::make_shared<Common::Fiber>([this] {
 | 
			
		||||
        while (true) {
 | 
			
		||||
            ScheduleImplFiber();
 | 
			
		||||
@@ -47,7 +47,7 @@ void KScheduler::SetInterruptTaskRunnable() {
 | 
			
		||||
void KScheduler::RequestScheduleOnInterrupt() {
 | 
			
		||||
    m_state.needs_scheduling = true;
 | 
			
		||||
 | 
			
		||||
    if (CanSchedule(kernel)) {
 | 
			
		||||
    if (CanSchedule(m_kernel)) {
 | 
			
		||||
        ScheduleOnInterrupt();
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -97,50 +97,50 @@ u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KScheduler::Schedule() {
 | 
			
		||||
    ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
 | 
			
		||||
    ASSERT(m_core_id == GetCurrentCoreId(kernel));
 | 
			
		||||
    ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
 | 
			
		||||
    ASSERT(m_core_id == GetCurrentCoreId(m_kernel));
 | 
			
		||||
 | 
			
		||||
    ScheduleImpl();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KScheduler::ScheduleOnInterrupt() {
 | 
			
		||||
    GetCurrentThread(kernel).DisableDispatch();
 | 
			
		||||
    GetCurrentThread(m_kernel).DisableDispatch();
 | 
			
		||||
    Schedule();
 | 
			
		||||
    GetCurrentThread(kernel).EnableDispatch();
 | 
			
		||||
    GetCurrentThread(m_kernel).EnableDispatch();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KScheduler::PreemptSingleCore() {
 | 
			
		||||
    GetCurrentThread(kernel).DisableDispatch();
 | 
			
		||||
    GetCurrentThread(m_kernel).DisableDispatch();
 | 
			
		||||
 | 
			
		||||
    auto* thread = GetCurrentThreadPointer(kernel);
 | 
			
		||||
    auto& previous_scheduler = kernel.Scheduler(thread->GetCurrentCore());
 | 
			
		||||
    auto* thread = GetCurrentThreadPointer(m_kernel);
 | 
			
		||||
    auto& previous_scheduler = m_kernel.Scheduler(thread->GetCurrentCore());
 | 
			
		||||
    previous_scheduler.Unload(thread);
 | 
			
		||||
 | 
			
		||||
    Common::Fiber::YieldTo(thread->GetHostContext(), *m_switch_fiber);
 | 
			
		||||
 | 
			
		||||
    GetCurrentThread(kernel).EnableDispatch();
 | 
			
		||||
    GetCurrentThread(m_kernel).EnableDispatch();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KScheduler::RescheduleCurrentCore() {
 | 
			
		||||
    ASSERT(!kernel.IsPhantomModeForSingleCore());
 | 
			
		||||
    ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
 | 
			
		||||
    ASSERT(!m_kernel.IsPhantomModeForSingleCore());
 | 
			
		||||
    ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
 | 
			
		||||
 | 
			
		||||
    GetCurrentThread(kernel).EnableDispatch();
 | 
			
		||||
    GetCurrentThread(m_kernel).EnableDispatch();
 | 
			
		||||
 | 
			
		||||
    if (m_state.needs_scheduling.load()) {
 | 
			
		||||
        // Disable interrupts, and then check again if rescheduling is needed.
 | 
			
		||||
        // KScopedInterruptDisable intr_disable;
 | 
			
		||||
 | 
			
		||||
        kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
 | 
			
		||||
        m_kernel.CurrentScheduler()->RescheduleCurrentCoreImpl();
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KScheduler::RescheduleCurrentCoreImpl() {
 | 
			
		||||
    // Check that scheduling is needed.
 | 
			
		||||
    if (m_state.needs_scheduling.load()) [[likely]] {
 | 
			
		||||
        GetCurrentThread(kernel).DisableDispatch();
 | 
			
		||||
        GetCurrentThread(m_kernel).DisableDispatch();
 | 
			
		||||
        Schedule();
 | 
			
		||||
        GetCurrentThread(kernel).EnableDispatch();
 | 
			
		||||
        GetCurrentThread(m_kernel).EnableDispatch();
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -153,14 +153,14 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core
 | 
			
		||||
 | 
			
		||||
    // Insert the main thread into the priority queue.
 | 
			
		||||
    // {
 | 
			
		||||
    //     KScopedSchedulerLock lk{kernel};
 | 
			
		||||
    //     GetPriorityQueue(kernel).PushBack(GetCurrentThreadPointer(kernel));
 | 
			
		||||
    //     SetSchedulerUpdateNeeded(kernel);
 | 
			
		||||
    //     KScopedSchedulerLock lk{m_kernel};
 | 
			
		||||
    //     GetPriorityQueue(m_kernel).PushBack(GetCurrentThreadPointer(m_kernel));
 | 
			
		||||
    //     SetSchedulerUpdateNeeded(m_kernel);
 | 
			
		||||
    // }
 | 
			
		||||
 | 
			
		||||
    // Bind interrupt handler.
 | 
			
		||||
    // kernel.GetInterruptManager().BindHandler(
 | 
			
		||||
    //     GetSchedulerInterruptHandler(kernel), KInterruptName::Scheduler, m_core_id,
 | 
			
		||||
    //     GetSchedulerInterruptHandler(m_kernel), KInterruptName::Scheduler, m_core_id,
 | 
			
		||||
    //     KInterruptController::PriorityLevel::Scheduler, false, false);
 | 
			
		||||
 | 
			
		||||
    // Set the current thread.
 | 
			
		||||
@@ -168,7 +168,7 @@ void KScheduler::Initialize(KThread* main_thread, KThread* idle_thread, s32 core
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KScheduler::Activate() {
 | 
			
		||||
    ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
 | 
			
		||||
    ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() == 1);
 | 
			
		||||
 | 
			
		||||
    // m_state.should_count_idle = KTargetSystem::IsDebugMode();
 | 
			
		||||
    m_is_active = true;
 | 
			
		||||
@@ -176,7 +176,7 @@ void KScheduler::Activate() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KScheduler::OnThreadStart() {
 | 
			
		||||
    GetCurrentThread(kernel).EnableDispatch();
 | 
			
		||||
    GetCurrentThread(m_kernel).EnableDispatch();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
 | 
			
		||||
@@ -184,7 +184,7 @@ u64 KScheduler::UpdateHighestPriorityThread(KThread* highest_thread) {
 | 
			
		||||
        prev_highest_thread != highest_thread) [[likely]] {
 | 
			
		||||
        if (prev_highest_thread != nullptr) [[likely]] {
 | 
			
		||||
            IncrementScheduledCount(prev_highest_thread);
 | 
			
		||||
            prev_highest_thread->SetLastScheduledTick(kernel.System().CoreTiming().GetCPUTicks());
 | 
			
		||||
            prev_highest_thread->SetLastScheduledTick(m_kernel.System().CoreTiming().GetCPUTicks());
 | 
			
		||||
        }
 | 
			
		||||
        if (m_state.should_count_idle) {
 | 
			
		||||
            if (highest_thread != nullptr) [[likely]] {
 | 
			
		||||
@@ -328,8 +328,8 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KScheduler::SwitchThread(KThread* next_thread) {
 | 
			
		||||
    KProcess* const cur_process = GetCurrentProcessPointer(kernel);
 | 
			
		||||
    KThread* const cur_thread = GetCurrentThreadPointer(kernel);
 | 
			
		||||
    KProcess* const cur_process = GetCurrentProcessPointer(m_kernel);
 | 
			
		||||
    KThread* const cur_thread = GetCurrentThreadPointer(m_kernel);
 | 
			
		||||
 | 
			
		||||
    // We never want to schedule a null thread, so use the idle thread if we don't have a next.
 | 
			
		||||
    if (next_thread == nullptr) {
 | 
			
		||||
@@ -351,7 +351,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
 | 
			
		||||
 | 
			
		||||
    // Update the CPU time tracking variables.
 | 
			
		||||
    const s64 prev_tick = m_last_context_switch_time;
 | 
			
		||||
    const s64 cur_tick = kernel.System().CoreTiming().GetCPUTicks();
 | 
			
		||||
    const s64 cur_tick = m_kernel.System().CoreTiming().GetCPUTicks();
 | 
			
		||||
    const s64 tick_diff = cur_tick - prev_tick;
 | 
			
		||||
    cur_thread->AddCpuTime(m_core_id, tick_diff);
 | 
			
		||||
    if (cur_process != nullptr) {
 | 
			
		||||
@@ -375,7 +375,7 @@ void KScheduler::SwitchThread(KThread* next_thread) {
 | 
			
		||||
    // }
 | 
			
		||||
 | 
			
		||||
    // Set the new thread.
 | 
			
		||||
    SetCurrentThread(kernel, next_thread);
 | 
			
		||||
    SetCurrentThread(m_kernel, next_thread);
 | 
			
		||||
    m_current_thread = next_thread;
 | 
			
		||||
 | 
			
		||||
    // Set the new Thread Local region.
 | 
			
		||||
@@ -388,7 +388,7 @@ void KScheduler::ScheduleImpl() {
 | 
			
		||||
    std::atomic_thread_fence(std::memory_order_seq_cst);
 | 
			
		||||
 | 
			
		||||
    // Load the appropriate thread pointers for scheduling.
 | 
			
		||||
    KThread* const cur_thread{GetCurrentThreadPointer(kernel)};
 | 
			
		||||
    KThread* const cur_thread{GetCurrentThreadPointer(m_kernel)};
 | 
			
		||||
    KThread* highest_priority_thread{m_state.highest_priority_thread};
 | 
			
		||||
 | 
			
		||||
    // Check whether there are runnable interrupt tasks.
 | 
			
		||||
@@ -493,7 +493,7 @@ void KScheduler::ScheduleImplFiber() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KScheduler::Unload(KThread* thread) {
 | 
			
		||||
    auto& cpu_core = kernel.System().ArmInterface(m_core_id);
 | 
			
		||||
    auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
 | 
			
		||||
    cpu_core.SaveContext(thread->GetContext32());
 | 
			
		||||
    cpu_core.SaveContext(thread->GetContext64());
 | 
			
		||||
    // Save the TPIDR_EL0 system register in case it was modified.
 | 
			
		||||
@@ -508,7 +508,7 @@ void KScheduler::Unload(KThread* thread) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KScheduler::Reload(KThread* thread) {
 | 
			
		||||
    auto& cpu_core = kernel.System().ArmInterface(m_core_id);
 | 
			
		||||
    auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
 | 
			
		||||
    cpu_core.LoadContext(thread->GetContext32());
 | 
			
		||||
    cpu_core.LoadContext(thread->GetContext64());
 | 
			
		||||
    cpu_core.SetTlsAddress(thread->GetTLSAddress());
 | 
			
		||||
@@ -891,7 +891,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) {
 | 
			
		||||
 | 
			
		||||
void KScheduler::RescheduleOtherCores(u64 cores_needing_scheduling) {
 | 
			
		||||
    if (const u64 core_mask = cores_needing_scheduling & ~(1ULL << m_core_id); core_mask != 0) {
 | 
			
		||||
        RescheduleCores(kernel, core_mask);
 | 
			
		||||
        RescheduleCores(m_kernel, core_mask);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -149,7 +149,7 @@ private:
 | 
			
		||||
        KInterruptTaskManager* interrupt_task_manager{nullptr};
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    KernelCore& kernel;
 | 
			
		||||
    KernelCore& m_kernel;
 | 
			
		||||
    SchedulingState m_state;
 | 
			
		||||
    bool m_is_active{false};
 | 
			
		||||
    s32 m_core_id{0};
 | 
			
		||||
 
 | 
			
		||||
@@ -12,7 +12,7 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KServerPort::KServerPort(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
 | 
			
		||||
KServerPort::KServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
 | 
			
		||||
KServerPort::~KServerPort() = default;
 | 
			
		||||
 | 
			
		||||
void KServerPort::Initialize(KPort* parent) {
 | 
			
		||||
@@ -35,7 +35,7 @@ void KServerPort::CleanupSessions() {
 | 
			
		||||
        // Get the last session in the list
 | 
			
		||||
        KServerSession* session = nullptr;
 | 
			
		||||
        {
 | 
			
		||||
            KScopedSchedulerLock sl{kernel};
 | 
			
		||||
            KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
            if (!m_session_list.empty()) {
 | 
			
		||||
                session = std::addressof(m_session_list.front());
 | 
			
		||||
                m_session_list.pop_front();
 | 
			
		||||
@@ -74,7 +74,7 @@ bool KServerPort::IsSignaled() const {
 | 
			
		||||
void KServerPort::EnqueueSession(KServerSession* session) {
 | 
			
		||||
    ASSERT(!this->IsLight());
 | 
			
		||||
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Add the session to our queue.
 | 
			
		||||
    m_session_list.push_back(*session);
 | 
			
		||||
@@ -86,7 +86,7 @@ void KServerPort::EnqueueSession(KServerSession* session) {
 | 
			
		||||
KServerSession* KServerPort::AcceptSession() {
 | 
			
		||||
    ASSERT(!this->IsLight());
 | 
			
		||||
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Return the first session in the list.
 | 
			
		||||
    if (m_session_list.empty()) {
 | 
			
		||||
 
 | 
			
		||||
@@ -22,7 +22,7 @@ class KServerPort final : public KSynchronizationObject {
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KServerPort(KernelCore& kernel_);
 | 
			
		||||
    explicit KServerPort(KernelCore& kernel);
 | 
			
		||||
    ~KServerPort() override;
 | 
			
		||||
 | 
			
		||||
    void Initialize(KPort* parent);
 | 
			
		||||
 
 | 
			
		||||
@@ -28,8 +28,8 @@ namespace Kernel {
 | 
			
		||||
 | 
			
		||||
using ThreadQueueImplForKServerSessionRequest = KThreadQueue;
 | 
			
		||||
 | 
			
		||||
KServerSession::KServerSession(KernelCore& kernel_)
 | 
			
		||||
    : KSynchronizationObject{kernel_}, m_lock{kernel_} {}
 | 
			
		||||
KServerSession::KServerSession(KernelCore& kernel)
 | 
			
		||||
    : KSynchronizationObject{kernel}, m_lock{m_kernel} {}
 | 
			
		||||
 | 
			
		||||
KServerSession::~KServerSession() = default;
 | 
			
		||||
 | 
			
		||||
@@ -56,7 +56,7 @@ void KServerSession::OnClientClosed() {
 | 
			
		||||
 | 
			
		||||
        // Get the next request.
 | 
			
		||||
        {
 | 
			
		||||
            KScopedSchedulerLock sl{kernel};
 | 
			
		||||
            KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
            if (m_current_request != nullptr && m_current_request != prev_request) {
 | 
			
		||||
                // Set the request, open a reference as we process it.
 | 
			
		||||
@@ -135,7 +135,7 @@ void KServerSession::OnClientClosed() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool KServerSession::IsSignaled() const {
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 | 
			
		||||
 | 
			
		||||
    // If the client is closed, we're always signaled.
 | 
			
		||||
    if (m_parent->IsClientClosed()) {
 | 
			
		||||
@@ -148,17 +148,17 @@ bool KServerSession::IsSignaled() const {
 | 
			
		||||
 | 
			
		||||
Result KServerSession::OnRequest(KSessionRequest* request) {
 | 
			
		||||
    // Create the wait queue.
 | 
			
		||||
    ThreadQueueImplForKServerSessionRequest wait_queue{kernel};
 | 
			
		||||
    ThreadQueueImplForKServerSessionRequest wait_queue{m_kernel};
 | 
			
		||||
 | 
			
		||||
    {
 | 
			
		||||
        // Lock the scheduler.
 | 
			
		||||
        KScopedSchedulerLock sl{kernel};
 | 
			
		||||
        KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
        // Ensure that we can handle new requests.
 | 
			
		||||
        R_UNLESS(!m_parent->IsServerClosed(), ResultSessionClosed);
 | 
			
		||||
 | 
			
		||||
        // Check that we're not terminating.
 | 
			
		||||
        R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested);
 | 
			
		||||
        R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
 | 
			
		||||
 | 
			
		||||
        // Get whether we're empty.
 | 
			
		||||
        const bool was_empty = m_request_list.empty();
 | 
			
		||||
@@ -176,11 +176,11 @@ Result KServerSession::OnRequest(KSessionRequest* request) {
 | 
			
		||||
        R_SUCCEED_IF(request->GetEvent() != nullptr);
 | 
			
		||||
 | 
			
		||||
        // This is a synchronous request, so we should wait for our request to complete.
 | 
			
		||||
        GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
 | 
			
		||||
        GetCurrentThread(kernel).BeginWait(&wait_queue);
 | 
			
		||||
        GetCurrentThread(m_kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
 | 
			
		||||
        GetCurrentThread(m_kernel).BeginWait(&wait_queue);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return GetCurrentThread(kernel).GetWaitResult();
 | 
			
		||||
    return GetCurrentThread(m_kernel).GetWaitResult();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KServerSession::SendReply(bool is_hle) {
 | 
			
		||||
@@ -190,7 +190,7 @@ Result KServerSession::SendReply(bool is_hle) {
 | 
			
		||||
    // Get the request.
 | 
			
		||||
    KSessionRequest* request;
 | 
			
		||||
    {
 | 
			
		||||
        KScopedSchedulerLock sl{kernel};
 | 
			
		||||
        KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
        // Get the current request.
 | 
			
		||||
        request = m_current_request;
 | 
			
		||||
@@ -222,8 +222,8 @@ Result KServerSession::SendReply(bool is_hle) {
 | 
			
		||||
            // HLE servers write directly to a pointer to the thread command buffer. Therefore
 | 
			
		||||
            // the reply has already been written in this case.
 | 
			
		||||
        } else {
 | 
			
		||||
            Core::Memory::Memory& memory{kernel.System().Memory()};
 | 
			
		||||
            KThread* server_thread{GetCurrentThreadPointer(kernel)};
 | 
			
		||||
            Core::Memory::Memory& memory{m_kernel.System().Memory()};
 | 
			
		||||
            KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
 | 
			
		||||
            UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
 | 
			
		||||
 | 
			
		||||
            auto* src_msg_buffer = memory.GetPointer(server_thread->GetTLSAddress());
 | 
			
		||||
@@ -264,7 +264,7 @@ Result KServerSession::SendReply(bool is_hle) {
 | 
			
		||||
            event->Signal();
 | 
			
		||||
        } else {
 | 
			
		||||
            // End the client thread's wait.
 | 
			
		||||
            KScopedSchedulerLock sl{kernel};
 | 
			
		||||
            KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
            if (!client_thread->IsTerminationRequested()) {
 | 
			
		||||
                client_thread->EndWait(client_result);
 | 
			
		||||
@@ -285,7 +285,7 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
 | 
			
		||||
    KThread* client_thread;
 | 
			
		||||
 | 
			
		||||
    {
 | 
			
		||||
        KScopedSchedulerLock sl{kernel};
 | 
			
		||||
        KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
        // Ensure that we can service the request.
 | 
			
		||||
        R_UNLESS(!m_parent->IsClientClosed(), ResultSessionClosed);
 | 
			
		||||
@@ -319,18 +319,18 @@ Result KServerSession::ReceiveRequest(std::shared_ptr<Service::HLERequestContext
 | 
			
		||||
    // bool recv_list_broken = false;
 | 
			
		||||
 | 
			
		||||
    // Receive the message.
 | 
			
		||||
    Core::Memory::Memory& memory{kernel.System().Memory()};
 | 
			
		||||
    Core::Memory::Memory& memory{m_kernel.System().Memory()};
 | 
			
		||||
    if (out_context != nullptr) {
 | 
			
		||||
        // HLE request.
 | 
			
		||||
        u32* cmd_buf{reinterpret_cast<u32*>(memory.GetPointer(client_message))};
 | 
			
		||||
        *out_context =
 | 
			
		||||
            std::make_shared<Service::HLERequestContext>(kernel, memory, this, client_thread);
 | 
			
		||||
            std::make_shared<Service::HLERequestContext>(m_kernel, memory, this, client_thread);
 | 
			
		||||
        (*out_context)->SetSessionRequestManager(manager);
 | 
			
		||||
        (*out_context)
 | 
			
		||||
            ->PopulateFromIncomingCommandBuffer(client_thread->GetOwnerProcess()->GetHandleTable(),
 | 
			
		||||
                                                cmd_buf);
 | 
			
		||||
    } else {
 | 
			
		||||
        KThread* server_thread{GetCurrentThreadPointer(kernel)};
 | 
			
		||||
        KThread* server_thread{GetCurrentThreadPointer(m_kernel)};
 | 
			
		||||
        UNIMPLEMENTED_IF(server_thread->GetOwnerProcess() != client_thread->GetOwnerProcess());
 | 
			
		||||
 | 
			
		||||
        auto* src_msg_buffer = memory.GetPointer(client_message);
 | 
			
		||||
@@ -350,7 +350,7 @@ void KServerSession::CleanupRequests() {
 | 
			
		||||
        // Get the next request.
 | 
			
		||||
        KSessionRequest* request = nullptr;
 | 
			
		||||
        {
 | 
			
		||||
            KScopedSchedulerLock sl{kernel};
 | 
			
		||||
            KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
            if (m_current_request) {
 | 
			
		||||
                // Choose the current request if we have one.
 | 
			
		||||
@@ -401,7 +401,7 @@ void KServerSession::CleanupRequests() {
 | 
			
		||||
                event->Signal();
 | 
			
		||||
            } else {
 | 
			
		||||
                // End the client thread's wait.
 | 
			
		||||
                KScopedSchedulerLock sl{kernel};
 | 
			
		||||
                KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
                if (!client_thread->IsTerminationRequested()) {
 | 
			
		||||
                    client_thread->EndWait(ResultSessionClosed);
 | 
			
		||||
 
 | 
			
		||||
@@ -33,7 +33,7 @@ class KServerSession final : public KSynchronizationObject,
 | 
			
		||||
    friend class ServiceThread;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KServerSession(KernelCore& kernel_);
 | 
			
		||||
    explicit KServerSession(KernelCore& kernel);
 | 
			
		||||
    ~KServerSession() override;
 | 
			
		||||
 | 
			
		||||
    void Destroy() override;
 | 
			
		||||
 
 | 
			
		||||
@@ -9,8 +9,8 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KSession::KSession(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel_}, m_server{kernel_}, m_client{kernel_} {}
 | 
			
		||||
KSession::KSession(KernelCore& kernel)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel}, m_server{kernel}, m_client{kernel} {}
 | 
			
		||||
KSession::~KSession() = default;
 | 
			
		||||
 | 
			
		||||
void KSession::Initialize(KClientPort* client_port, uintptr_t name) {
 | 
			
		||||
@@ -34,7 +34,7 @@ void KSession::Initialize(KClientPort* client_port, uintptr_t name) {
 | 
			
		||||
 | 
			
		||||
    // Set our owner process.
 | 
			
		||||
    //! FIXME: this is the wrong process!
 | 
			
		||||
    m_process = kernel.ApplicationProcess();
 | 
			
		||||
    m_process = m_kernel.ApplicationProcess();
 | 
			
		||||
    m_process->Open();
 | 
			
		||||
 | 
			
		||||
    // Set our port.
 | 
			
		||||
 
 | 
			
		||||
@@ -18,7 +18,7 @@ class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAut
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KSession, KAutoObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KSession(KernelCore& kernel_);
 | 
			
		||||
    explicit KSession(KernelCore& kernel);
 | 
			
		||||
    ~KSession() override;
 | 
			
		||||
 | 
			
		||||
    void Initialize(KClientPort* port, uintptr_t name);
 | 
			
		||||
 
 | 
			
		||||
@@ -158,7 +158,7 @@ public:
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KSessionRequest(KernelCore& kernel_) : KAutoObject(kernel_), m_mappings(kernel_) {}
 | 
			
		||||
    explicit KSessionRequest(KernelCore& kernel) : KAutoObject(kernel), m_mappings(kernel) {}
 | 
			
		||||
 | 
			
		||||
    static KSessionRequest* Create(KernelCore& kernel) {
 | 
			
		||||
        KSessionRequest* req = KSessionRequest::Allocate(kernel);
 | 
			
		||||
@@ -170,13 +170,13 @@ public:
 | 
			
		||||
 | 
			
		||||
    void Destroy() override {
 | 
			
		||||
        this->Finalize();
 | 
			
		||||
        KSessionRequest::Free(kernel, this);
 | 
			
		||||
        KSessionRequest::Free(m_kernel, this);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void Initialize(KEvent* event, uintptr_t address, size_t size) {
 | 
			
		||||
        m_mappings.Initialize();
 | 
			
		||||
 | 
			
		||||
        m_thread = GetCurrentThreadPointer(kernel);
 | 
			
		||||
        m_thread = GetCurrentThreadPointer(m_kernel);
 | 
			
		||||
        m_event = event;
 | 
			
		||||
        m_address = address;
 | 
			
		||||
        m_size = size;
 | 
			
		||||
 
 | 
			
		||||
@@ -12,7 +12,7 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KSharedMemory::KSharedMemory(KernelCore& kernel_) : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
 | 
			
		||||
KSharedMemory::KSharedMemory(KernelCore& kernel) : KAutoObjectWithSlabHeapAndContainer{kernel} {}
 | 
			
		||||
KSharedMemory::~KSharedMemory() = default;
 | 
			
		||||
 | 
			
		||||
Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* owner_process,
 | 
			
		||||
@@ -28,7 +28,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* ow
 | 
			
		||||
    const size_t num_pages = Common::DivideUp(size, PageSize);
 | 
			
		||||
 | 
			
		||||
    // Get the resource limit.
 | 
			
		||||
    KResourceLimit* reslimit = kernel.GetSystemResourceLimit();
 | 
			
		||||
    KResourceLimit* reslimit = m_kernel.GetSystemResourceLimit();
 | 
			
		||||
 | 
			
		||||
    // Reserve memory for ourselves.
 | 
			
		||||
    KScopedResourceReservation memory_reservation(reslimit, LimitableResource::PhysicalMemoryMax,
 | 
			
		||||
@@ -40,11 +40,11 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory, KProcess* ow
 | 
			
		||||
    //! HACK: Open continuous mapping from sysmodule pool.
 | 
			
		||||
    auto option = KMemoryManager::EncodeOption(KMemoryManager::Pool::Secure,
 | 
			
		||||
                                               KMemoryManager::Direction::FromBack);
 | 
			
		||||
    m_physical_address = kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option);
 | 
			
		||||
    m_physical_address = m_kernel.MemoryManager().AllocateAndOpenContinuous(num_pages, 1, option);
 | 
			
		||||
    R_UNLESS(m_physical_address != 0, ResultOutOfMemory);
 | 
			
		||||
 | 
			
		||||
    //! Insert the result into our page group.
 | 
			
		||||
    m_page_group.emplace(kernel, &kernel.GetSystemSystemResource().GetBlockInfoManager());
 | 
			
		||||
    m_page_group.emplace(m_kernel, &m_kernel.GetSystemSystemResource().GetBlockInfoManager());
 | 
			
		||||
    m_page_group->AddBlock(m_physical_address, num_pages);
 | 
			
		||||
 | 
			
		||||
    // Commit our reservation.
 | 
			
		||||
 
 | 
			
		||||
@@ -23,7 +23,7 @@ class KSharedMemory final
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KSharedMemory(KernelCore& kernel_);
 | 
			
		||||
    explicit KSharedMemory(KernelCore& kernel);
 | 
			
		||||
    ~KSharedMemory() override;
 | 
			
		||||
 | 
			
		||||
    Result Initialize(Core::DeviceMemory& device_memory_, KProcess* owner_process_,
 | 
			
		||||
 
 | 
			
		||||
@@ -17,9 +17,9 @@ namespace {
 | 
			
		||||
 | 
			
		||||
class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
 | 
			
		||||
public:
 | 
			
		||||
    ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o,
 | 
			
		||||
    ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel, KSynchronizationObject** o,
 | 
			
		||||
                                                 KSynchronizationObject::ThreadListNode* n, s32 c)
 | 
			
		||||
        : KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
 | 
			
		||||
        : KThreadQueueWithoutEndWait(kernel), m_objects(o), m_nodes(n), m_count(c) {}
 | 
			
		||||
 | 
			
		||||
    void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
 | 
			
		||||
                         Result wait_result) override {
 | 
			
		||||
@@ -144,13 +144,12 @@ Result KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
 | 
			
		||||
    R_RETURN(thread->GetWaitResult());
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithList{kernel_} {}
 | 
			
		||||
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : KAutoObjectWithList{kernel} {}
 | 
			
		||||
 | 
			
		||||
KSynchronizationObject::~KSynchronizationObject() = default;
 | 
			
		||||
 | 
			
		||||
void KSynchronizationObject::NotifyAvailable(Result result) {
 | 
			
		||||
    KScopedSchedulerLock sl(kernel);
 | 
			
		||||
    KScopedSchedulerLock sl(m_kernel);
 | 
			
		||||
 | 
			
		||||
    // If we're not signaled, we've nothing to notify.
 | 
			
		||||
    if (!this->IsSignaled()) {
 | 
			
		||||
@@ -168,7 +167,7 @@ std::vector<KThread*> KSynchronizationObject::GetWaitingThreadsForDebugging() co
 | 
			
		||||
 | 
			
		||||
    // If debugging, dump the list of waiters.
 | 
			
		||||
    {
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(m_kernel);
 | 
			
		||||
        for (auto* cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
 | 
			
		||||
            threads.emplace_back(cur_node->thread);
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
@@ -21,7 +21,7 @@ class KSystemResource : public KAutoObject {
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KSystemResource, KAutoObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KSystemResource(KernelCore& kernel_) : KAutoObject(kernel_) {}
 | 
			
		||||
    explicit KSystemResource(KernelCore& kernel) : KAutoObject(kernel) {}
 | 
			
		||||
 | 
			
		||||
protected:
 | 
			
		||||
    void SetSecureResource() {
 | 
			
		||||
@@ -87,8 +87,8 @@ private:
 | 
			
		||||
class KSecureSystemResource final
 | 
			
		||||
    : public KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource> {
 | 
			
		||||
public:
 | 
			
		||||
    explicit KSecureSystemResource(KernelCore& kernel_)
 | 
			
		||||
        : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel_) {
 | 
			
		||||
    explicit KSecureSystemResource(KernelCore& kernel)
 | 
			
		||||
        : KAutoObjectWithSlabHeap<KSecureSystemResource, KSystemResource>(kernel) {
 | 
			
		||||
        // Mark ourselves as being a secure resource.
 | 
			
		||||
        this->SetSecureResource();
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -77,14 +77,14 @@ struct ThreadLocalRegion {
 | 
			
		||||
 | 
			
		||||
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
 | 
			
		||||
public:
 | 
			
		||||
    explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_)
 | 
			
		||||
        : KThreadQueueWithoutEndWait(kernel_) {}
 | 
			
		||||
    explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel)
 | 
			
		||||
        : KThreadQueueWithoutEndWait(kernel) {}
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
 | 
			
		||||
public:
 | 
			
		||||
    explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
 | 
			
		||||
        : KThreadQueue(kernel_), m_wait_list(wl) {}
 | 
			
		||||
    explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel, KThread::WaiterList* wl)
 | 
			
		||||
        : KThreadQueue(kernel), m_wait_list(wl) {}
 | 
			
		||||
 | 
			
		||||
    void CancelWait(KThread* waiting_thread, Result wait_result, bool cancel_timer_task) override {
 | 
			
		||||
        // Remove the thread from the wait list.
 | 
			
		||||
@@ -100,8 +100,8 @@ private:
 | 
			
		||||
 | 
			
		||||
} // namespace
 | 
			
		||||
 | 
			
		||||
KThread::KThread(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
 | 
			
		||||
KThread::KThread(KernelCore& kernel)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel}, activity_pause_lock{kernel} {}
 | 
			
		||||
KThread::~KThread() = default;
 | 
			
		||||
 | 
			
		||||
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
 | 
			
		||||
@@ -236,7 +236,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
 | 
			
		||||
    SetInExceptionHandler();
 | 
			
		||||
 | 
			
		||||
    // Set thread ID.
 | 
			
		||||
    thread_id = kernel.CreateNewThreadID();
 | 
			
		||||
    thread_id = m_kernel.CreateNewThreadID();
 | 
			
		||||
 | 
			
		||||
    // We initialized!
 | 
			
		||||
    initialized = true;
 | 
			
		||||
@@ -343,7 +343,7 @@ void KThread::Finalize() {
 | 
			
		||||
    // Release any waiters.
 | 
			
		||||
    {
 | 
			
		||||
        ASSERT(waiting_lock_info == nullptr);
 | 
			
		||||
        KScopedSchedulerLock sl{kernel};
 | 
			
		||||
        KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
        // Check that we have no kernel waiters.
 | 
			
		||||
        ASSERT(num_kernel_waiters == 0);
 | 
			
		||||
@@ -374,7 +374,7 @@ void KThread::Finalize() {
 | 
			
		||||
            it = held_lock_info_list.erase(it);
 | 
			
		||||
 | 
			
		||||
            // Free the lock info.
 | 
			
		||||
            LockWithPriorityInheritanceInfo::Free(kernel, lock_info);
 | 
			
		||||
            LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -390,7 +390,7 @@ bool KThread::IsSignaled() const {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::OnTimer() {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
 | 
			
		||||
    // If we're waiting, cancel the wait.
 | 
			
		||||
    if (GetState() == ThreadState::Waiting) {
 | 
			
		||||
@@ -399,12 +399,12 @@ void KThread::OnTimer() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::StartTermination() {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
 | 
			
		||||
    // Release user exception and unpin, if relevant.
 | 
			
		||||
    if (parent != nullptr) {
 | 
			
		||||
        parent->ReleaseUserException(this);
 | 
			
		||||
        if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) {
 | 
			
		||||
        if (parent->GetPinnedThread(GetCurrentCoreId(m_kernel)) == this) {
 | 
			
		||||
            parent->UnpinCurrentThread(core_id);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@@ -422,7 +422,7 @@ void KThread::StartTermination() {
 | 
			
		||||
    KSynchronizationObject::NotifyAvailable();
 | 
			
		||||
 | 
			
		||||
    // Clear previous thread in KScheduler.
 | 
			
		||||
    KScheduler::ClearPreviousThread(kernel, this);
 | 
			
		||||
    KScheduler::ClearPreviousThread(m_kernel, this);
 | 
			
		||||
 | 
			
		||||
    // Register terminated dpc flag.
 | 
			
		||||
    RegisterDpc(DpcFlag::Terminated);
 | 
			
		||||
@@ -434,7 +434,7 @@ void KThread::FinishTermination() {
 | 
			
		||||
        for (std::size_t i = 0; i < static_cast<std::size_t>(Core::Hardware::NUM_CPU_CORES); ++i) {
 | 
			
		||||
            KThread* core_thread{};
 | 
			
		||||
            do {
 | 
			
		||||
                core_thread = kernel.Scheduler(i).GetSchedulerCurrentThread();
 | 
			
		||||
                core_thread = m_kernel.Scheduler(i).GetSchedulerCurrentThread();
 | 
			
		||||
            } while (core_thread == this);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@@ -449,7 +449,7 @@ void KThread::DoWorkerTaskImpl() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::Pin(s32 current_core) {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
 | 
			
		||||
    // Set ourselves as pinned.
 | 
			
		||||
    GetStackParameters().is_pinned = true;
 | 
			
		||||
@@ -472,7 +472,7 @@ void KThread::Pin(s32 current_core) {
 | 
			
		||||
 | 
			
		||||
        if (active_core != current_core || physical_affinity_mask.GetAffinityMask() !=
 | 
			
		||||
                                               original_physical_affinity_mask.GetAffinityMask()) {
 | 
			
		||||
            KScheduler::OnThreadAffinityMaskChanged(kernel, this, original_physical_affinity_mask,
 | 
			
		||||
            KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, original_physical_affinity_mask,
 | 
			
		||||
                                                    active_core);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@@ -492,7 +492,7 @@ void KThread::Pin(s32 current_core) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::Unpin() {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
 | 
			
		||||
    // Set ourselves as unpinned.
 | 
			
		||||
    GetStackParameters().is_pinned = false;
 | 
			
		||||
@@ -520,7 +520,7 @@ void KThread::Unpin() {
 | 
			
		||||
                        std::countl_zero(physical_affinity_mask.GetAffinityMask())));
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
 | 
			
		||||
            KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -549,7 +549,7 @@ u16 KThread::GetUserDisableCount() const {
 | 
			
		||||
        return {};
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    auto& memory = kernel.System().Memory();
 | 
			
		||||
    auto& memory = m_kernel.System().Memory();
 | 
			
		||||
    return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -559,7 +559,7 @@ void KThread::SetInterruptFlag() {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    auto& memory = kernel.System().Memory();
 | 
			
		||||
    auto& memory = m_kernel.System().Memory();
 | 
			
		||||
    memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -569,12 +569,12 @@ void KThread::ClearInterruptFlag() {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    auto& memory = kernel.System().Memory();
 | 
			
		||||
    auto& memory = m_kernel.System().Memory();
 | 
			
		||||
    memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Get the virtual mask.
 | 
			
		||||
    *out_ideal_core = virtual_ideal_core_id;
 | 
			
		||||
@@ -584,7 +584,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
    ASSERT(num_core_migration_disables >= 0);
 | 
			
		||||
 | 
			
		||||
    // Select between core mask and original core mask.
 | 
			
		||||
@@ -607,7 +607,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
 | 
			
		||||
    // Set the core mask.
 | 
			
		||||
    u64 p_affinity_mask = 0;
 | 
			
		||||
    {
 | 
			
		||||
        KScopedSchedulerLock sl(kernel);
 | 
			
		||||
        KScopedSchedulerLock sl(m_kernel);
 | 
			
		||||
        ASSERT(num_core_migration_disables >= 0);
 | 
			
		||||
 | 
			
		||||
        // If we're updating, set our ideal virtual core.
 | 
			
		||||
@@ -653,7 +653,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
 | 
			
		||||
                                  std::countl_zero(physical_affinity_mask.GetAffinityMask()));
 | 
			
		||||
                    SetActiveCore(new_core);
 | 
			
		||||
                }
 | 
			
		||||
                KScheduler::OnThreadAffinityMaskChanged(kernel, this, old_mask, active_core);
 | 
			
		||||
                KScheduler::OnThreadAffinityMaskChanged(m_kernel, this, old_mask, active_core);
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            // Otherwise, we edit the original affinity for restoration later.
 | 
			
		||||
@@ -663,12 +663,12 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Update the pinned waiter list.
 | 
			
		||||
    ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list));
 | 
			
		||||
    ThreadQueueImplForKThreadSetProperty wait_queue_(m_kernel, std::addressof(pinned_waiter_list));
 | 
			
		||||
    {
 | 
			
		||||
        bool retry_update{};
 | 
			
		||||
        do {
 | 
			
		||||
            // Lock the scheduler.
 | 
			
		||||
            KScopedSchedulerLock sl(kernel);
 | 
			
		||||
            KScopedSchedulerLock sl(m_kernel);
 | 
			
		||||
 | 
			
		||||
            // Don't do any further management if our termination has been requested.
 | 
			
		||||
            R_SUCCEED_IF(IsTerminationRequested());
 | 
			
		||||
@@ -681,7 +681,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
 | 
			
		||||
            s32 thread_core;
 | 
			
		||||
            for (thread_core = 0; thread_core < static_cast<s32>(Core::Hardware::NUM_CPU_CORES);
 | 
			
		||||
                 ++thread_core) {
 | 
			
		||||
                if (kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
 | 
			
		||||
                if (m_kernel.Scheduler(thread_core).GetSchedulerCurrentThread() == this) {
 | 
			
		||||
                    thread_is_current = true;
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
@@ -693,12 +693,12 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
 | 
			
		||||
                // If the thread is pinned, we want to wait until it's not pinned.
 | 
			
		||||
                if (GetStackParameters().is_pinned) {
 | 
			
		||||
                    // Verify that the current thread isn't terminating.
 | 
			
		||||
                    R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
 | 
			
		||||
                    R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
 | 
			
		||||
                             ResultTerminationRequested);
 | 
			
		||||
 | 
			
		||||
                    // Wait until the thread isn't pinned any more.
 | 
			
		||||
                    pinned_waiter_list.push_back(GetCurrentThread(kernel));
 | 
			
		||||
                    GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
 | 
			
		||||
                    pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
 | 
			
		||||
                    GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue_));
 | 
			
		||||
                } else {
 | 
			
		||||
                    // If the thread isn't pinned, release the scheduler lock and retry until it's
 | 
			
		||||
                    // not current.
 | 
			
		||||
@@ -714,13 +714,13 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
 | 
			
		||||
void KThread::SetBasePriority(s32 value) {
 | 
			
		||||
    ASSERT(Svc::HighestThreadPriority <= value && value <= Svc::LowestThreadPriority);
 | 
			
		||||
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Change our base priority.
 | 
			
		||||
    base_priority = value;
 | 
			
		||||
 | 
			
		||||
    // Perform a priority restoration.
 | 
			
		||||
    RestorePriority(kernel, this);
 | 
			
		||||
    RestorePriority(m_kernel, this);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
KThread* KThread::GetLockOwner() const {
 | 
			
		||||
@@ -729,7 +729,7 @@ KThread* KThread::GetLockOwner() const {
 | 
			
		||||
 | 
			
		||||
void KThread::IncreaseBasePriority(s32 priority_) {
 | 
			
		||||
    ASSERT(Svc::HighestThreadPriority <= priority_ && priority_ <= Svc::LowestThreadPriority);
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 | 
			
		||||
    ASSERT(!this->GetStackParameters().is_pinned);
 | 
			
		||||
 | 
			
		||||
    // Set our base priority.
 | 
			
		||||
@@ -737,12 +737,12 @@ void KThread::IncreaseBasePriority(s32 priority_) {
 | 
			
		||||
        base_priority = priority_;
 | 
			
		||||
 | 
			
		||||
        // Perform a priority restoration.
 | 
			
		||||
        RestorePriority(kernel, this);
 | 
			
		||||
        RestorePriority(m_kernel, this);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::RequestSuspend(SuspendType type) {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Note the request in our flags.
 | 
			
		||||
    suspend_request_flags |=
 | 
			
		||||
@@ -753,7 +753,7 @@ void KThread::RequestSuspend(SuspendType type) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::Resume(SuspendType type) {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Clear the request in our flags.
 | 
			
		||||
    suspend_request_flags &=
 | 
			
		||||
@@ -764,7 +764,7 @@ void KThread::Resume(SuspendType type) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::WaitCancel() {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Check if we're waiting and cancellable.
 | 
			
		||||
    if (this->GetState() == ThreadState::Waiting && cancellable) {
 | 
			
		||||
@@ -777,7 +777,7 @@ void KThread::WaitCancel() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::TrySuspend() {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(IsSuspendRequested());
 | 
			
		||||
 | 
			
		||||
    // Ensure that we have no waiters.
 | 
			
		||||
@@ -791,7 +791,7 @@ void KThread::TrySuspend() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::UpdateState() {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
 | 
			
		||||
    // Set our suspend flags in state.
 | 
			
		||||
    const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
 | 
			
		||||
@@ -801,37 +801,37 @@ void KThread::UpdateState() {
 | 
			
		||||
 | 
			
		||||
    // Note the state change in scheduler.
 | 
			
		||||
    if (new_state != old_state) {
 | 
			
		||||
        KScheduler::OnThreadStateChanged(kernel, this, old_state);
 | 
			
		||||
        KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::Continue() {
 | 
			
		||||
    ASSERT(kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
 | 
			
		||||
    // Clear our suspend flags in state.
 | 
			
		||||
    const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
 | 
			
		||||
    thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
 | 
			
		||||
 | 
			
		||||
    // Note the state change in scheduler.
 | 
			
		||||
    KScheduler::OnThreadStateChanged(kernel, this, old_state);
 | 
			
		||||
    KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::CloneFpuStatus() {
 | 
			
		||||
    // We shouldn't reach here when starting kernel threads.
 | 
			
		||||
    ASSERT(this->GetOwnerProcess() != nullptr);
 | 
			
		||||
    ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(kernel));
 | 
			
		||||
    ASSERT(this->GetOwnerProcess() == GetCurrentProcessPointer(m_kernel));
 | 
			
		||||
 | 
			
		||||
    if (this->GetOwnerProcess()->Is64BitProcess()) {
 | 
			
		||||
        // Clone FPSR and FPCR.
 | 
			
		||||
        ThreadContext64 cur_ctx{};
 | 
			
		||||
        kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
 | 
			
		||||
        m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
 | 
			
		||||
 | 
			
		||||
        this->GetContext64().fpcr = cur_ctx.fpcr;
 | 
			
		||||
        this->GetContext64().fpsr = cur_ctx.fpsr;
 | 
			
		||||
    } else {
 | 
			
		||||
        // Clone FPSCR.
 | 
			
		||||
        ThreadContext32 cur_ctx{};
 | 
			
		||||
        kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
 | 
			
		||||
        m_kernel.System().CurrentArmInterface().SaveContext(cur_ctx);
 | 
			
		||||
 | 
			
		||||
        this->GetContext32().fpscr = cur_ctx.fpscr;
 | 
			
		||||
    }
 | 
			
		||||
@@ -844,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
 | 
			
		||||
    // Set the activity.
 | 
			
		||||
    {
 | 
			
		||||
        // Lock the scheduler.
 | 
			
		||||
        KScopedSchedulerLock sl(kernel);
 | 
			
		||||
        KScopedSchedulerLock sl(m_kernel);
 | 
			
		||||
 | 
			
		||||
        // Verify our state.
 | 
			
		||||
        const auto cur_state = this->GetState();
 | 
			
		||||
@@ -871,13 +871,13 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
 | 
			
		||||
 | 
			
		||||
    // If the thread is now paused, update the pinned waiter list.
 | 
			
		||||
    if (activity == Svc::ThreadActivity::Paused) {
 | 
			
		||||
        ThreadQueueImplForKThreadSetProperty wait_queue_(kernel,
 | 
			
		||||
        ThreadQueueImplForKThreadSetProperty wait_queue_(m_kernel,
 | 
			
		||||
                                                         std::addressof(pinned_waiter_list));
 | 
			
		||||
 | 
			
		||||
        bool thread_is_current;
 | 
			
		||||
        do {
 | 
			
		||||
            // Lock the scheduler.
 | 
			
		||||
            KScopedSchedulerLock sl(kernel);
 | 
			
		||||
            KScopedSchedulerLock sl(m_kernel);
 | 
			
		||||
 | 
			
		||||
            // Don't do any further management if our termination has been requested.
 | 
			
		||||
            R_SUCCEED_IF(this->IsTerminationRequested());
 | 
			
		||||
@@ -888,17 +888,17 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
 | 
			
		||||
            // Check whether the thread is pinned.
 | 
			
		||||
            if (this->GetStackParameters().is_pinned) {
 | 
			
		||||
                // Verify that the current thread isn't terminating.
 | 
			
		||||
                R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
 | 
			
		||||
                R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(),
 | 
			
		||||
                         ResultTerminationRequested);
 | 
			
		||||
 | 
			
		||||
                // Wait until the thread isn't pinned any more.
 | 
			
		||||
                pinned_waiter_list.push_back(GetCurrentThread(kernel));
 | 
			
		||||
                GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
 | 
			
		||||
                pinned_waiter_list.push_back(GetCurrentThread(m_kernel));
 | 
			
		||||
                GetCurrentThread(m_kernel).BeginWait(std::addressof(wait_queue_));
 | 
			
		||||
            } else {
 | 
			
		||||
                // Check if the thread is currently running.
 | 
			
		||||
                // If it is, we'll need to retry.
 | 
			
		||||
                for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
 | 
			
		||||
                    if (kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
 | 
			
		||||
                    if (m_kernel.Scheduler(i).GetSchedulerCurrentThread() == this) {
 | 
			
		||||
                        thread_is_current = true;
 | 
			
		||||
                        break;
 | 
			
		||||
                    }
 | 
			
		||||
@@ -917,7 +917,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
 | 
			
		||||
    // Get the context.
 | 
			
		||||
    {
 | 
			
		||||
        // Lock the scheduler.
 | 
			
		||||
        KScopedSchedulerLock sl{kernel};
 | 
			
		||||
        KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
        // Verify that we're suspended.
 | 
			
		||||
        R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
 | 
			
		||||
@@ -946,7 +946,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 | 
			
		||||
 | 
			
		||||
    // Set ourselves as the lock's owner.
 | 
			
		||||
    lock_info->SetOwner(this);
 | 
			
		||||
@@ -957,7 +957,7 @@ void KThread::AddHeldLock(LockWithPriorityInheritanceInfo* lock_info) {
 | 
			
		||||
 | 
			
		||||
KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_key_,
 | 
			
		||||
                                                                bool is_kernel_address_key_) {
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 | 
			
		||||
 | 
			
		||||
    // Try to find an existing held lock.
 | 
			
		||||
    for (auto& held_lock : held_lock_info_list) {
 | 
			
		||||
@@ -971,7 +971,7 @@ KThread::LockWithPriorityInheritanceInfo* KThread::FindHeldLock(VAddr address_ke
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::AddWaiterImpl(KThread* thread) {
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 | 
			
		||||
    ASSERT(thread->GetConditionVariableTree() == nullptr);
 | 
			
		||||
 | 
			
		||||
    // Get the thread's address key.
 | 
			
		||||
@@ -981,7 +981,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
 | 
			
		||||
    // Keep track of how many kernel waiters we have.
 | 
			
		||||
    if (is_kernel_address_key_) {
 | 
			
		||||
        ASSERT((num_kernel_waiters++) >= 0);
 | 
			
		||||
        KScheduler::SetSchedulerUpdateNeeded(kernel);
 | 
			
		||||
        KScheduler::SetSchedulerUpdateNeeded(m_kernel);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Get the relevant lock info.
 | 
			
		||||
@@ -989,7 +989,7 @@ void KThread::AddWaiterImpl(KThread* thread) {
 | 
			
		||||
    if (lock_info == nullptr) {
 | 
			
		||||
        // Create a new lock for the address key.
 | 
			
		||||
        lock_info =
 | 
			
		||||
            LockWithPriorityInheritanceInfo::Create(kernel, address_key_, is_kernel_address_key_);
 | 
			
		||||
            LockWithPriorityInheritanceInfo::Create(m_kernel, address_key_, is_kernel_address_key_);
 | 
			
		||||
 | 
			
		||||
        // Add the new lock to our list.
 | 
			
		||||
        this->AddHeldLock(lock_info);
 | 
			
		||||
@@ -1000,12 +1000,12 @@ void KThread::AddWaiterImpl(KThread* thread) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::RemoveWaiterImpl(KThread* thread) {
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 | 
			
		||||
 | 
			
		||||
    // Keep track of how many kernel waiters we have.
 | 
			
		||||
    if (thread->GetIsKernelAddressKey()) {
 | 
			
		||||
        ASSERT((num_kernel_waiters--) > 0);
 | 
			
		||||
        KScheduler::SetSchedulerUpdateNeeded(kernel);
 | 
			
		||||
        KScheduler::SetSchedulerUpdateNeeded(m_kernel);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Get the info for the lock the thread is waiting on.
 | 
			
		||||
@@ -1015,7 +1015,7 @@ void KThread::RemoveWaiterImpl(KThread* thread) {
 | 
			
		||||
    // Remove the waiter.
 | 
			
		||||
    if (lock_info->RemoveWaiter(thread)) {
 | 
			
		||||
        held_lock_info_list.erase(held_lock_info_list.iterator_to(*lock_info));
 | 
			
		||||
        LockWithPriorityInheritanceInfo::Free(kernel, lock_info);
 | 
			
		||||
        LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -1076,7 +1076,7 @@ void KThread::AddWaiter(KThread* thread) {
 | 
			
		||||
 | 
			
		||||
    // If the thread has a higher priority than us, we should inherit.
 | 
			
		||||
    if (thread->GetPriority() < this->GetPriority()) {
 | 
			
		||||
        RestorePriority(kernel, this);
 | 
			
		||||
        RestorePriority(m_kernel, this);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -1087,12 +1087,12 @@ void KThread::RemoveWaiter(KThread* thread) {
 | 
			
		||||
    // lower priority.
 | 
			
		||||
    if (this->GetPriority() == thread->GetPriority() &&
 | 
			
		||||
        this->GetPriority() < this->GetBasePriority()) {
 | 
			
		||||
        RestorePriority(kernel, this);
 | 
			
		||||
        RestorePriority(m_kernel, this);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_kernel_address_key_) {
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 | 
			
		||||
 | 
			
		||||
    // Get the relevant lock info.
 | 
			
		||||
    auto* lock_info = this->FindHeldLock(key, is_kernel_address_key_);
 | 
			
		||||
@@ -1108,7 +1108,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
 | 
			
		||||
    if (lock_info->GetIsKernelAddressKey()) {
 | 
			
		||||
        num_kernel_waiters -= lock_info->GetWaiterCount();
 | 
			
		||||
        ASSERT(num_kernel_waiters >= 0);
 | 
			
		||||
        KScheduler::SetSchedulerUpdateNeeded(kernel);
 | 
			
		||||
        KScheduler::SetSchedulerUpdateNeeded(m_kernel);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ASSERT(lock_info->GetWaiterCount() > 0);
 | 
			
		||||
@@ -1120,7 +1120,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
 | 
			
		||||
        *out_has_waiters = false;
 | 
			
		||||
 | 
			
		||||
        // Free the lock info, since it has no waiters.
 | 
			
		||||
        LockWithPriorityInheritanceInfo::Free(kernel, lock_info);
 | 
			
		||||
        LockWithPriorityInheritanceInfo::Free(m_kernel, lock_info);
 | 
			
		||||
    } else {
 | 
			
		||||
        // There are additional waiters on the lock.
 | 
			
		||||
        *out_has_waiters = true;
 | 
			
		||||
@@ -1142,7 +1142,7 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
 | 
			
		||||
    // to lower priority.
 | 
			
		||||
    if (this->GetPriority() == next_lock_owner->GetPriority() &&
 | 
			
		||||
        this->GetPriority() < this->GetBasePriority()) {
 | 
			
		||||
        RestorePriority(kernel, this);
 | 
			
		||||
        RestorePriority(m_kernel, this);
 | 
			
		||||
        // NOTE: No need to restore priority on the next lock owner, because it was already the
 | 
			
		||||
        // highest priority waiter on the lock.
 | 
			
		||||
    }
 | 
			
		||||
@@ -1153,18 +1153,18 @@ KThread* KThread::RemoveWaiterByKey(bool* out_has_waiters, VAddr key, bool is_ke
 | 
			
		||||
 | 
			
		||||
Result KThread::Run() {
 | 
			
		||||
    while (true) {
 | 
			
		||||
        KScopedSchedulerLock lk{kernel};
 | 
			
		||||
        KScopedSchedulerLock lk{m_kernel};
 | 
			
		||||
 | 
			
		||||
        // If either this thread or the current thread are requesting termination, note it.
 | 
			
		||||
        R_UNLESS(!IsTerminationRequested(), ResultTerminationRequested);
 | 
			
		||||
        R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(), ResultTerminationRequested);
 | 
			
		||||
        R_UNLESS(!GetCurrentThread(m_kernel).IsTerminationRequested(), ResultTerminationRequested);
 | 
			
		||||
 | 
			
		||||
        // Ensure our thread state is correct.
 | 
			
		||||
        R_UNLESS(GetState() == ThreadState::Initialized, ResultInvalidState);
 | 
			
		||||
 | 
			
		||||
        // If the current thread has been asked to suspend, suspend it and retry.
 | 
			
		||||
        if (GetCurrentThread(kernel).IsSuspended()) {
 | 
			
		||||
            GetCurrentThread(kernel).UpdateState();
 | 
			
		||||
        if (GetCurrentThread(m_kernel).IsSuspended()) {
 | 
			
		||||
            GetCurrentThread(m_kernel).UpdateState();
 | 
			
		||||
            continue;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
@@ -1184,7 +1184,7 @@ Result KThread::Run() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::Exit() {
 | 
			
		||||
    ASSERT(this == GetCurrentThreadPointer(kernel));
 | 
			
		||||
    ASSERT(this == GetCurrentThreadPointer(m_kernel));
 | 
			
		||||
 | 
			
		||||
    // Release the thread resource hint, running thread count from parent.
 | 
			
		||||
    if (parent != nullptr) {
 | 
			
		||||
@@ -1195,7 +1195,7 @@ void KThread::Exit() {
 | 
			
		||||
 | 
			
		||||
    // Perform termination.
 | 
			
		||||
    {
 | 
			
		||||
        KScopedSchedulerLock sl{kernel};
 | 
			
		||||
        KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
        // Disallow all suspension.
 | 
			
		||||
        suspend_allowed_flags = 0;
 | 
			
		||||
@@ -1208,21 +1208,21 @@ void KThread::Exit() {
 | 
			
		||||
        StartTermination();
 | 
			
		||||
 | 
			
		||||
        // Register the thread as a work task.
 | 
			
		||||
        KWorkerTaskManager::AddTask(kernel, KWorkerTaskManager::WorkerType::Exit, this);
 | 
			
		||||
        KWorkerTaskManager::AddTask(m_kernel, KWorkerTaskManager::WorkerType::Exit, this);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    UNREACHABLE_MSG("KThread::Exit() would return");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KThread::Terminate() {
 | 
			
		||||
    ASSERT(this != GetCurrentThreadPointer(kernel));
 | 
			
		||||
    ASSERT(this != GetCurrentThreadPointer(m_kernel));
 | 
			
		||||
 | 
			
		||||
    // Request the thread terminate if it hasn't already.
 | 
			
		||||
    if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
 | 
			
		||||
        // If the thread isn't terminated, wait for it to terminate.
 | 
			
		||||
        s32 index;
 | 
			
		||||
        KSynchronizationObject* objects[] = {this};
 | 
			
		||||
        R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1,
 | 
			
		||||
        R_TRY(KSynchronizationObject::Wait(m_kernel, std::addressof(index), objects, 1,
 | 
			
		||||
                                           Svc::WaitInfinite));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -1230,9 +1230,9 @@ Result KThread::Terminate() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ThreadState KThread::RequestTerminate() {
 | 
			
		||||
    ASSERT(this != GetCurrentThreadPointer(kernel));
 | 
			
		||||
    ASSERT(this != GetCurrentThreadPointer(m_kernel));
 | 
			
		||||
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Determine if this is the first termination request.
 | 
			
		||||
    const bool first_request = [&]() -> bool {
 | 
			
		||||
@@ -1268,10 +1268,10 @@ ThreadState KThread::RequestTerminate() {
 | 
			
		||||
 | 
			
		||||
        // If the thread is runnable, send a termination interrupt to other cores.
 | 
			
		||||
        if (this->GetState() == ThreadState::Runnable) {
 | 
			
		||||
            if (const u64 core_mask =
 | 
			
		||||
                    physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel));
 | 
			
		||||
            if (const u64 core_mask = physical_affinity_mask.GetAffinityMask() &
 | 
			
		||||
                                      ~(1ULL << GetCurrentCoreId(m_kernel));
 | 
			
		||||
                core_mask != 0) {
 | 
			
		||||
                Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask);
 | 
			
		||||
                Kernel::KInterruptManager::SendInterProcessorInterrupt(m_kernel, core_mask);
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
@@ -1285,15 +1285,15 @@ ThreadState KThread::RequestTerminate() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
Result KThread::Sleep(s64 timeout) {
 | 
			
		||||
    ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(this == GetCurrentThreadPointer(kernel));
 | 
			
		||||
    ASSERT(!m_kernel.GlobalSchedulerContext().IsLocked());
 | 
			
		||||
    ASSERT(this == GetCurrentThreadPointer(m_kernel));
 | 
			
		||||
    ASSERT(timeout > 0);
 | 
			
		||||
 | 
			
		||||
    ThreadQueueImplForKThreadSleep wait_queue_(kernel);
 | 
			
		||||
    ThreadQueueImplForKThreadSleep wait_queue_(m_kernel);
 | 
			
		||||
    KHardwareTimer* timer{};
 | 
			
		||||
    {
 | 
			
		||||
        // Setup the scheduling lock and sleep.
 | 
			
		||||
        KScopedSchedulerLockAndSleep slp(kernel, std::addressof(timer), this, timeout);
 | 
			
		||||
        KScopedSchedulerLockAndSleep slp(m_kernel, std::addressof(timer), this, timeout);
 | 
			
		||||
 | 
			
		||||
        // Check if the thread should terminate.
 | 
			
		||||
        if (this->IsTerminationRequested()) {
 | 
			
		||||
@@ -1311,7 +1311,7 @@ Result KThread::Sleep(s64 timeout) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::RequestDummyThreadWait() {
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 | 
			
		||||
    ASSERT(this->IsDummyThread());
 | 
			
		||||
 | 
			
		||||
    // We will block when the scheduler lock is released.
 | 
			
		||||
@@ -1319,7 +1319,7 @@ void KThread::RequestDummyThreadWait() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::DummyThreadBeginWait() {
 | 
			
		||||
    if (!this->IsDummyThread() || kernel.IsPhantomModeForSingleCore()) {
 | 
			
		||||
    if (!this->IsDummyThread() || m_kernel.IsPhantomModeForSingleCore()) {
 | 
			
		||||
        // Occurs in single core mode.
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
@@ -1329,7 +1329,7 @@ void KThread::DummyThreadBeginWait() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::DummyThreadEndWait() {
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
 | 
			
		||||
    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(m_kernel));
 | 
			
		||||
    ASSERT(this->IsDummyThread());
 | 
			
		||||
 | 
			
		||||
    // Wake up the waiting thread.
 | 
			
		||||
@@ -1347,7 +1347,7 @@ void KThread::BeginWait(KThreadQueue* queue) {
 | 
			
		||||
 | 
			
		||||
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wait_result_) {
 | 
			
		||||
    // Lock the scheduler.
 | 
			
		||||
    KScopedSchedulerLock sl(kernel);
 | 
			
		||||
    KScopedSchedulerLock sl(m_kernel);
 | 
			
		||||
 | 
			
		||||
    // If we're waiting, notify our queue that we're available.
 | 
			
		||||
    if (GetState() == ThreadState::Waiting) {
 | 
			
		||||
@@ -1357,7 +1357,7 @@ void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, Result wa
 | 
			
		||||
 | 
			
		||||
void KThread::EndWait(Result wait_result_) {
 | 
			
		||||
    // Lock the scheduler.
 | 
			
		||||
    KScopedSchedulerLock sl(kernel);
 | 
			
		||||
    KScopedSchedulerLock sl(m_kernel);
 | 
			
		||||
 | 
			
		||||
    // If we're waiting, notify our queue that we're available.
 | 
			
		||||
    if (GetState() == ThreadState::Waiting) {
 | 
			
		||||
@@ -1373,7 +1373,7 @@ void KThread::EndWait(Result wait_result_) {
 | 
			
		||||
 | 
			
		||||
void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
 | 
			
		||||
    // Lock the scheduler.
 | 
			
		||||
    KScopedSchedulerLock sl(kernel);
 | 
			
		||||
    KScopedSchedulerLock sl(m_kernel);
 | 
			
		||||
 | 
			
		||||
    // If we're waiting, notify our queue that we're available.
 | 
			
		||||
    if (GetState() == ThreadState::Waiting) {
 | 
			
		||||
@@ -1382,7 +1382,7 @@ void KThread::CancelWait(Result wait_result_, bool cancel_timer_task) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void KThread::SetState(ThreadState state) {
 | 
			
		||||
    KScopedSchedulerLock sl{kernel};
 | 
			
		||||
    KScopedSchedulerLock sl{m_kernel};
 | 
			
		||||
 | 
			
		||||
    // Clear debugging state
 | 
			
		||||
    SetMutexWaitAddressForDebugging({});
 | 
			
		||||
@@ -1393,7 +1393,7 @@ void KThread::SetState(ThreadState state) {
 | 
			
		||||
        static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
 | 
			
		||||
        std::memory_order_relaxed);
 | 
			
		||||
    if (thread_state.load(std::memory_order_relaxed) != old_state) {
 | 
			
		||||
        KScheduler::OnThreadStateChanged(kernel, this, old_state);
 | 
			
		||||
        KScheduler::OnThreadStateChanged(m_kernel, this, old_state);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -1427,20 +1427,20 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
 | 
			
		||||
 | 
			
		||||
KScopedDisableDispatch::~KScopedDisableDispatch() {
 | 
			
		||||
    // If we are shutting down the kernel, none of this is relevant anymore.
 | 
			
		||||
    if (kernel.IsShuttingDown()) {
 | 
			
		||||
    if (m_kernel.IsShuttingDown()) {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
 | 
			
		||||
        auto* scheduler = kernel.CurrentScheduler();
 | 
			
		||||
    if (GetCurrentThread(m_kernel).GetDisableDispatchCount() <= 1) {
 | 
			
		||||
        auto* scheduler = m_kernel.CurrentScheduler();
 | 
			
		||||
 | 
			
		||||
        if (scheduler && !kernel.IsPhantomModeForSingleCore()) {
 | 
			
		||||
        if (scheduler && !m_kernel.IsPhantomModeForSingleCore()) {
 | 
			
		||||
            scheduler->RescheduleCurrentCore();
 | 
			
		||||
        } else {
 | 
			
		||||
            KScheduler::RescheduleCurrentHLEThread(kernel);
 | 
			
		||||
            KScheduler::RescheduleCurrentHLEThread(m_kernel);
 | 
			
		||||
        }
 | 
			
		||||
    } else {
 | 
			
		||||
        GetCurrentThread(kernel).EnableDispatch();
 | 
			
		||||
        GetCurrentThread(m_kernel).EnableDispatch();
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -128,7 +128,7 @@ public:
 | 
			
		||||
    static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
 | 
			
		||||
    static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2;
 | 
			
		||||
 | 
			
		||||
    explicit KThread(KernelCore& kernel_);
 | 
			
		||||
    explicit KThread(KernelCore& kernel);
 | 
			
		||||
    ~KThread() override;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
@@ -494,12 +494,12 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void DisableDispatch() {
 | 
			
		||||
        ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
 | 
			
		||||
        ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() >= 0);
 | 
			
		||||
        this->GetStackParameters().disable_count++;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void EnableDispatch() {
 | 
			
		||||
        ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
 | 
			
		||||
        ASSERT(GetCurrentThread(m_kernel).GetDisableDispatchCount() > 0);
 | 
			
		||||
        this->GetStackParameters().disable_count--;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -970,9 +970,9 @@ public:
 | 
			
		||||
 | 
			
		||||
class KScopedDisableDispatch {
 | 
			
		||||
public:
 | 
			
		||||
    [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
 | 
			
		||||
    [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel) : m_kernel{kernel} {
 | 
			
		||||
        // If we are shutting down the kernel, none of this is relevant anymore.
 | 
			
		||||
        if (kernel.IsShuttingDown()) {
 | 
			
		||||
        if (m_kernel.IsShuttingDown()) {
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        GetCurrentThread(kernel).DisableDispatch();
 | 
			
		||||
@@ -981,7 +981,7 @@ public:
 | 
			
		||||
    ~KScopedDisableDispatch();
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    KernelCore& kernel;
 | 
			
		||||
    KernelCore& m_kernel;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
inline void KTimerTask::OnTimer() {
 | 
			
		||||
 
 | 
			
		||||
@@ -31,7 +31,7 @@ private:
 | 
			
		||||
 | 
			
		||||
class KThreadQueueWithoutEndWait : public KThreadQueue {
 | 
			
		||||
public:
 | 
			
		||||
    explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
 | 
			
		||||
    explicit KThreadQueueWithoutEndWait(KernelCore& kernel) : KThreadQueue(kernel) {}
 | 
			
		||||
 | 
			
		||||
    void EndWait(KThread* waiting_thread, Result wait_result) override final;
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -8,23 +8,23 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KTransferMemory::KTransferMemory(KernelCore& kernel_)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel_} {}
 | 
			
		||||
KTransferMemory::KTransferMemory(KernelCore& kernel)
 | 
			
		||||
    : KAutoObjectWithSlabHeapAndContainer{kernel} {}
 | 
			
		||||
 | 
			
		||||
KTransferMemory::~KTransferMemory() = default;
 | 
			
		||||
 | 
			
		||||
Result KTransferMemory::Initialize(VAddr address_, std::size_t size_,
 | 
			
		||||
                                   Svc::MemoryPermission owner_perm_) {
 | 
			
		||||
Result KTransferMemory::Initialize(VAddr address, std::size_t size,
 | 
			
		||||
                                   Svc::MemoryPermission owner_perm) {
 | 
			
		||||
    // Set members.
 | 
			
		||||
    m_owner = GetCurrentProcessPointer(kernel);
 | 
			
		||||
    m_owner = GetCurrentProcessPointer(m_kernel);
 | 
			
		||||
 | 
			
		||||
    // TODO(bunnei): Lock for transfer memory
 | 
			
		||||
 | 
			
		||||
    // Set remaining tracking members.
 | 
			
		||||
    m_owner->Open();
 | 
			
		||||
    m_owner_perm = owner_perm_;
 | 
			
		||||
    m_address = address_;
 | 
			
		||||
    m_size = size_;
 | 
			
		||||
    m_owner_perm = owner_perm;
 | 
			
		||||
    m_address = address;
 | 
			
		||||
    m_size = size;
 | 
			
		||||
    m_is_initialized = true;
 | 
			
		||||
 | 
			
		||||
    R_SUCCEED();
 | 
			
		||||
 
 | 
			
		||||
@@ -23,10 +23,10 @@ class KTransferMemory final
 | 
			
		||||
    KERNEL_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject);
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KTransferMemory(KernelCore& kernel_);
 | 
			
		||||
    explicit KTransferMemory(KernelCore& kernel);
 | 
			
		||||
    ~KTransferMemory() override;
 | 
			
		||||
 | 
			
		||||
    Result Initialize(VAddr address_, std::size_t size_, Svc::MemoryPermission owner_perm_);
 | 
			
		||||
    Result Initialize(VAddr address, std::size_t size, Svc::MemoryPermission owner_perm);
 | 
			
		||||
 | 
			
		||||
    void Finalize() override;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -9,7 +9,7 @@ namespace Kernel {
 | 
			
		||||
 | 
			
		||||
class KWorkerTask : public KSynchronizationObject {
 | 
			
		||||
public:
 | 
			
		||||
    explicit KWorkerTask(KernelCore& kernel_);
 | 
			
		||||
    explicit KWorkerTask(KernelCore& kernel);
 | 
			
		||||
 | 
			
		||||
    void DoWorkerTask();
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -10,7 +10,7 @@
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
KWorkerTask::KWorkerTask(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
 | 
			
		||||
KWorkerTask::KWorkerTask(KernelCore& kernel) : KSynchronizationObject{kernel} {}
 | 
			
		||||
 | 
			
		||||
void KWorkerTask::DoWorkerTask() {
 | 
			
		||||
    if (auto* const thread = this->DynamicCast<KThread*>(); thread != nullptr) {
 | 
			
		||||
 
 | 
			
		||||
@@ -20,7 +20,7 @@ public:
 | 
			
		||||
 | 
			
		||||
    KWorkerTaskManager();
 | 
			
		||||
 | 
			
		||||
    static void AddTask(KernelCore& kernel_, WorkerType type, KWorkerTask* task);
 | 
			
		||||
    static void AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task);
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    void AddTask(KernelCore& kernel, KWorkerTask* task);
 | 
			
		||||
 
 | 
			
		||||
@@ -66,7 +66,7 @@ private:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KAutoObjectWithSlabHeap(KernelCore& kernel_) : Base(kernel_), kernel(kernel_) {}
 | 
			
		||||
    explicit KAutoObjectWithSlabHeap(KernelCore& kernel) : Base(kernel) {}
 | 
			
		||||
    virtual ~KAutoObjectWithSlabHeap() = default;
 | 
			
		||||
 | 
			
		||||
    virtual void Destroy() override {
 | 
			
		||||
@@ -76,7 +76,7 @@ public:
 | 
			
		||||
            arg = this->GetPostDestroyArgument();
 | 
			
		||||
            this->Finalize();
 | 
			
		||||
        }
 | 
			
		||||
        Free(kernel, static_cast<Derived*>(this));
 | 
			
		||||
        Free(Base::m_kernel, static_cast<Derived*>(this));
 | 
			
		||||
        if (is_initialized) {
 | 
			
		||||
            Derived::PostDestroy(arg);
 | 
			
		||||
        }
 | 
			
		||||
@@ -90,7 +90,7 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    size_t GetSlabIndex() const {
 | 
			
		||||
        return SlabHeap<Derived>(kernel).GetObjectIndex(static_cast<const Derived*>(this));
 | 
			
		||||
        return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
@@ -125,9 +125,6 @@ public:
 | 
			
		||||
    static size_t GetNumRemaining(KernelCore& kernel) {
 | 
			
		||||
        return kernel.SlabHeap<Derived>().GetNumRemaining();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
protected:
 | 
			
		||||
    KernelCore& kernel;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename Derived, typename Base>
 | 
			
		||||
@@ -144,18 +141,18 @@ private:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel_) : Base(kernel_) {}
 | 
			
		||||
    KAutoObjectWithSlabHeapAndContainer(KernelCore& kernel) : Base(kernel) {}
 | 
			
		||||
    virtual ~KAutoObjectWithSlabHeapAndContainer() {}
 | 
			
		||||
 | 
			
		||||
    virtual void Destroy() override {
 | 
			
		||||
        const bool is_initialized = this->IsInitialized();
 | 
			
		||||
        uintptr_t arg = 0;
 | 
			
		||||
        if (is_initialized) {
 | 
			
		||||
            Base::kernel.ObjectListContainer().Unregister(this);
 | 
			
		||||
            Base::m_kernel.ObjectListContainer().Unregister(this);
 | 
			
		||||
            arg = this->GetPostDestroyArgument();
 | 
			
		||||
            this->Finalize();
 | 
			
		||||
        }
 | 
			
		||||
        Free(Base::kernel, static_cast<Derived*>(this));
 | 
			
		||||
        Free(Base::m_kernel, static_cast<Derived*>(this));
 | 
			
		||||
        if (is_initialized) {
 | 
			
		||||
            Derived::PostDestroy(arg);
 | 
			
		||||
        }
 | 
			
		||||
@@ -169,7 +166,7 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    size_t GetSlabIndex() const {
 | 
			
		||||
        return SlabHeap<Derived>(Base::kernel).GetObjectIndex(static_cast<const Derived*>(this));
 | 
			
		||||
        return SlabHeap<Derived>(Base::m_kernel).GetObjectIndex(static_cast<const Derived*>(this));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user