Kernel: Address Feedback.
This commit is contained in:
		
				
					committed by
					
						
						FernandoS27
					
				
			
			
				
	
			
			
			
						parent
						
							25f8606a6d
						
					
				
				
					commit
					3073615dbc
				
			@@ -21,11 +21,11 @@ namespace Kernel {
 | 
			
		||||
 | 
			
		||||
class AddressArbiter;
 | 
			
		||||
class ClientPort;
 | 
			
		||||
class GlobalScheduler;
 | 
			
		||||
class HandleTable;
 | 
			
		||||
class Process;
 | 
			
		||||
class ResourceLimit;
 | 
			
		||||
class Thread;
 | 
			
		||||
class GlobalScheduler;
 | 
			
		||||
 | 
			
		||||
/// Represents a single instance of the kernel.
 | 
			
		||||
class KernelCore {
 | 
			
		||||
 
 | 
			
		||||
@@ -23,7 +23,7 @@
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
GlobalScheduler::GlobalScheduler(Core::System& system) : system{system} {
 | 
			
		||||
    reselection_pending = false;
 | 
			
		||||
    is_reselection_pending = false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GlobalScheduler::AddThread(SharedPtr<Thread> thread) {
 | 
			
		||||
@@ -61,7 +61,7 @@ void GlobalScheduler::SelectThread(u32 core) {
 | 
			
		||||
            }
 | 
			
		||||
            sched.selected_thread = thread;
 | 
			
		||||
        }
 | 
			
		||||
        sched.context_switch_pending = sched.selected_thread != sched.current_thread;
 | 
			
		||||
        sched.is_context_switch_pending = sched.selected_thread != sched.current_thread;
 | 
			
		||||
        std::atomic_thread_fence(std::memory_order_seq_cst);
 | 
			
		||||
    };
 | 
			
		||||
    Scheduler& sched = system.Scheduler(core);
 | 
			
		||||
@@ -318,10 +318,18 @@ void GlobalScheduler::PreemptThreads() {
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        reselection_pending.store(true, std::memory_order_release);
 | 
			
		||||
        is_reselection_pending.store(true, std::memory_order_release);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GlobalScheduler::Suggest(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
    suggested_queue[core].add(thread, priority);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GlobalScheduler::Unsuggest(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
    suggested_queue[core].remove(thread, priority);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GlobalScheduler::Schedule(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
    ASSERT_MSG(thread->GetProcessorID() == core, "Thread must be assigned to this core.");
 | 
			
		||||
    scheduled_queue[core].add(thread, priority);
 | 
			
		||||
@@ -332,12 +340,40 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
    scheduled_queue[core].add(thread, priority, false);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GlobalScheduler::Reschedule(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
    scheduled_queue[core].remove(thread, priority);
 | 
			
		||||
    scheduled_queue[core].add(thread, priority);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GlobalScheduler::Unschedule(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
    scheduled_queue[core].remove(thread, priority);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void GlobalScheduler::TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
 | 
			
		||||
    const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
 | 
			
		||||
    const s32 source_core = thread->GetProcessorID();
 | 
			
		||||
    if (source_core == destination_core || !schedulable) {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
    thread->SetProcessorID(destination_core);
 | 
			
		||||
    if (source_core >= 0) {
 | 
			
		||||
        Unschedule(priority, source_core, thread);
 | 
			
		||||
    }
 | 
			
		||||
    if (destination_core >= 0) {
 | 
			
		||||
        Unsuggest(priority, destination_core, thread);
 | 
			
		||||
        Schedule(priority, destination_core, thread);
 | 
			
		||||
    }
 | 
			
		||||
    if (source_core >= 0) {
 | 
			
		||||
        Suggest(priority, source_core, thread);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
 | 
			
		||||
    if (current_thread == winner) {
 | 
			
		||||
        current_thread->IncrementYieldCount();
 | 
			
		||||
        return true;
 | 
			
		||||
    } else {
 | 
			
		||||
        reselection_pending.store(true, std::memory_order_release);
 | 
			
		||||
        is_reselection_pending.store(true, std::memory_order_release);
 | 
			
		||||
        return false;
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -378,7 +414,7 @@ u64 Scheduler::GetLastContextSwitchTicks() const {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Scheduler::TryDoContextSwitch() {
 | 
			
		||||
    if (context_switch_pending) {
 | 
			
		||||
    if (is_context_switch_pending ) {
 | 
			
		||||
        SwitchContext();
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@@ -409,7 +445,7 @@ void Scheduler::SwitchContext() {
 | 
			
		||||
    Thread* const previous_thread = GetCurrentThread();
 | 
			
		||||
    Thread* const new_thread = GetSelectedThread();
 | 
			
		||||
 | 
			
		||||
    context_switch_pending = false;
 | 
			
		||||
    is_context_switch_pending = false;
 | 
			
		||||
    if (new_thread == previous_thread) {
 | 
			
		||||
        return;
 | 
			
		||||
    }
 | 
			
		||||
@@ -477,4 +513,9 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
 | 
			
		||||
    last_context_switch_time = most_recent_switch_ticks;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Scheduler::Shutdown() {
 | 
			
		||||
    current_thread = nullptr;
 | 
			
		||||
    selected_thread = nullptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 
 | 
			
		||||
@@ -39,15 +39,11 @@ public:
 | 
			
		||||
 | 
			
		||||
    // Add a thread to the suggested queue of a cpu core. Suggested threads may be
 | 
			
		||||
    // picked if no thread is scheduled to run on the core.
 | 
			
		||||
    void Suggest(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
        suggested_queue[core].add(thread, priority);
 | 
			
		||||
    }
 | 
			
		||||
    void Suggest(u32 priority, u32 core, Thread* thread);
 | 
			
		||||
 | 
			
		||||
    // Remove a thread to the suggested queue of a cpu core. Suggested threads may be
 | 
			
		||||
    // picked if no thread is scheduled to run on the core.
 | 
			
		||||
    void Unsuggest(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
        suggested_queue[core].remove(thread, priority);
 | 
			
		||||
    }
 | 
			
		||||
    void Unsuggest(u32 priority, u32 core, Thread* thread);
 | 
			
		||||
 | 
			
		||||
    // Add a thread to the scheduling queue of a cpu core. The thread is added at the
 | 
			
		||||
    // back the queue in its priority level
 | 
			
		||||
@@ -58,37 +54,15 @@ public:
 | 
			
		||||
    void SchedulePrepend(u32 priority, u32 core, Thread* thread);
 | 
			
		||||
 | 
			
		||||
    // Reschedule an already scheduled thread based on a new priority
 | 
			
		||||
    void Reschedule(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
        scheduled_queue[core].remove(thread, priority);
 | 
			
		||||
        scheduled_queue[core].add(thread, priority);
 | 
			
		||||
    }
 | 
			
		||||
    void Reschedule(u32 priority, u32 core, Thread* thread);
 | 
			
		||||
 | 
			
		||||
    // Unschedule a thread.
 | 
			
		||||
    void Unschedule(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
        scheduled_queue[core].remove(thread, priority);
 | 
			
		||||
    }
 | 
			
		||||
    void Unschedule(u32 priority, u32 core, Thread* thread);
 | 
			
		||||
 | 
			
		||||
    // Transfers a thread into an specific core. If the destination_core is -1
 | 
			
		||||
    // it will be unscheduled from its source code and added into its suggested
 | 
			
		||||
    // queue.
 | 
			
		||||
    void TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
 | 
			
		||||
        const bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
 | 
			
		||||
        const s32 source_core = thread->GetProcessorID();
 | 
			
		||||
        if (source_core == destination_core || !schedulable) {
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
        thread->SetProcessorID(destination_core);
 | 
			
		||||
        if (source_core >= 0) {
 | 
			
		||||
            Unschedule(priority, source_core, thread);
 | 
			
		||||
        }
 | 
			
		||||
        if (destination_core >= 0) {
 | 
			
		||||
            Unsuggest(priority, destination_core, thread);
 | 
			
		||||
            Schedule(priority, destination_core, thread);
 | 
			
		||||
        }
 | 
			
		||||
        if (source_core >= 0) {
 | 
			
		||||
            Suggest(priority, source_core, thread);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
    void TransferToCore(u32 priority, s32 destination_core, Thread* thread);
 | 
			
		||||
 | 
			
		||||
    /*
 | 
			
		||||
     * UnloadThread selects a core and forces it to unload its current thread's context
 | 
			
		||||
@@ -133,6 +107,12 @@ public:
 | 
			
		||||
     */
 | 
			
		||||
    bool YieldThreadAndWaitForLoadBalancing(Thread* thread);
 | 
			
		||||
 | 
			
		||||
    /*
 | 
			
		||||
     * PreemptThreads this operation rotates the scheduling queues of threads at
 | 
			
		||||
     * a preemption priority and then does some core rebalancing. Preemption priorities
 | 
			
		||||
     * can be found in the array 'preemption_priorities'. This operation happens
 | 
			
		||||
     * every 10ms.
 | 
			
		||||
     */
 | 
			
		||||
    void PreemptThreads();
 | 
			
		||||
 | 
			
		||||
    u32 CpuCoresCount() const {
 | 
			
		||||
@@ -140,11 +120,11 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void SetReselectionPending() {
 | 
			
		||||
        reselection_pending.store(true, std::memory_order_release);
 | 
			
		||||
        is_reselection_pending.store(true, std::memory_order_release);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool IsReselectionPending() const {
 | 
			
		||||
        return reselection_pending.load();
 | 
			
		||||
        return is_reselection_pending.load(std::memory_order_acquire);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void Shutdown();
 | 
			
		||||
@@ -155,8 +135,10 @@ private:
 | 
			
		||||
    static constexpr u32 min_regular_priority = 2;
 | 
			
		||||
    std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;
 | 
			
		||||
    std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
 | 
			
		||||
    std::atomic<bool> reselection_pending;
 | 
			
		||||
    std::atomic<bool> is_reselection_pending;
 | 
			
		||||
 | 
			
		||||
    // `preemption_priorities` are the priority levels at which the global scheduler
 | 
			
		||||
    // preempts threads every 10 ms. They are ordered from Core 0 to Core 3
 | 
			
		||||
    std::array<u32, NUM_CPU_CORES> preemption_priorities = {59, 59, 59, 62};
 | 
			
		||||
 | 
			
		||||
    /// Lists all thread ids that aren't deleted/etc.
 | 
			
		||||
@@ -166,7 +148,7 @@ private:
 | 
			
		||||
 | 
			
		||||
class Scheduler final {
 | 
			
		||||
public:
 | 
			
		||||
    explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, const u32 core_id);
 | 
			
		||||
    explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 core_id);
 | 
			
		||||
    ~Scheduler();
 | 
			
		||||
 | 
			
		||||
    /// Returns whether there are any threads that are ready to run.
 | 
			
		||||
@@ -175,26 +157,27 @@ public:
 | 
			
		||||
    /// Reschedules to the next available thread (call after current thread is suspended)
 | 
			
		||||
    void TryDoContextSwitch();
 | 
			
		||||
 | 
			
		||||
    /// Unloads currently running thread
 | 
			
		||||
    void UnloadThread();
 | 
			
		||||
 | 
			
		||||
    /// Select the threads in top of the scheduling multilist.
 | 
			
		||||
    void SelectThreads();
 | 
			
		||||
 | 
			
		||||
    /// Gets the current running thread
 | 
			
		||||
    Thread* GetCurrentThread() const;
 | 
			
		||||
 | 
			
		||||
    /// Gets the currently selected thread from the top of the multilevel queue
 | 
			
		||||
    Thread* GetSelectedThread() const;
 | 
			
		||||
 | 
			
		||||
    /// Gets the timestamp for the last context switch in ticks.
 | 
			
		||||
    u64 GetLastContextSwitchTicks() const;
 | 
			
		||||
 | 
			
		||||
    bool ContextSwitchPending() const {
 | 
			
		||||
        return context_switch_pending;
 | 
			
		||||
        return is_context_switch_pending;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void Shutdown() {
 | 
			
		||||
        current_thread = nullptr;
 | 
			
		||||
        selected_thread = nullptr;
 | 
			
		||||
    }
 | 
			
		||||
    /// Shutdowns the scheduler.
 | 
			
		||||
    void Shutdown();
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    friend class GlobalScheduler;
 | 
			
		||||
@@ -226,7 +209,7 @@ private:
 | 
			
		||||
    u64 idle_selection_count = 0;
 | 
			
		||||
    const u32 core_id;
 | 
			
		||||
 | 
			
		||||
    bool context_switch_pending = false;
 | 
			
		||||
    bool is_context_switch_pending = false;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 
 | 
			
		||||
@@ -1556,18 +1556,18 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
 | 
			
		||||
 | 
			
		||||
    auto& scheduler = system.CurrentScheduler();
 | 
			
		||||
    auto* const current_thread = scheduler.GetCurrentThread();
 | 
			
		||||
    bool redundant = false;
 | 
			
		||||
    bool is_redundant = false;
 | 
			
		||||
 | 
			
		||||
    if (nanoseconds <= 0) {
 | 
			
		||||
        switch (static_cast<SleepType>(nanoseconds)) {
 | 
			
		||||
        case SleepType::YieldWithoutLoadBalancing:
 | 
			
		||||
            redundant = current_thread->YieldSimple();
 | 
			
		||||
            is_redundant = current_thread->YieldSimple();
 | 
			
		||||
            break;
 | 
			
		||||
        case SleepType::YieldWithLoadBalancing:
 | 
			
		||||
            redundant = current_thread->YieldAndBalanceLoad();
 | 
			
		||||
            is_redundant = current_thread->YieldAndBalanceLoad();
 | 
			
		||||
            break;
 | 
			
		||||
        case SleepType::YieldAndWaitForLoadBalancing:
 | 
			
		||||
            redundant = current_thread->YieldAndWaitForLoadBalancing();
 | 
			
		||||
            is_redundant = current_thread->YieldAndWaitForLoadBalancing();
 | 
			
		||||
            break;
 | 
			
		||||
        default:
 | 
			
		||||
            UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
 | 
			
		||||
@@ -1576,9 +1576,9 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
 | 
			
		||||
        current_thread->Sleep(nanoseconds);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (redundant) {
 | 
			
		||||
    if (is_redundant) {
 | 
			
		||||
        // If it's redundant, the core is pretty much idle. Some games keep idling
 | 
			
		||||
        // a core while it's doing nothing, we advance timing to avoid costly continuos
 | 
			
		||||
        // a core while it's doing nothing, we advance timing to avoid costly continuous
 | 
			
		||||
        // calls.
 | 
			
		||||
        system.CoreTiming().AddTicks(2000);
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -389,13 +389,13 @@ bool Thread::YieldAndWaitForLoadBalancing() {
 | 
			
		||||
 | 
			
		||||
void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
 | 
			
		||||
    const u32 old_flags = scheduling_state;
 | 
			
		||||
    scheduling_state =
 | 
			
		||||
        (scheduling_state & ThreadSchedMasks::HighMask) | static_cast<u32>(new_status);
 | 
			
		||||
    scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
 | 
			
		||||
                       static_cast<u32>(new_status);
 | 
			
		||||
    AdjustSchedulingOnStatus(old_flags);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Thread::SetCurrentPriority(u32 new_priority) {
 | 
			
		||||
    u32 old_priority = std::exchange(current_priority, new_priority);
 | 
			
		||||
    const u32 old_priority = std::exchange(current_priority, new_priority);
 | 
			
		||||
    AdjustSchedulingOnPriority(old_priority);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -410,10 +410,9 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    const bool use_override = affinity_override_count != 0;
 | 
			
		||||
    // The value -3 is "do not change the ideal core".
 | 
			
		||||
    if (new_core == -3) {
 | 
			
		||||
    if (new_core == static_cast<s32>(CoreFlags::DontChangeIdealCore)) {
 | 
			
		||||
        new_core = use_override ? ideal_core_override : ideal_core;
 | 
			
		||||
        if ((new_affinity_mask & (1 << new_core)) == 0) {
 | 
			
		||||
        if ((new_affinity_mask & (1ULL << new_core)) == 0) {
 | 
			
		||||
            return ERR_INVALID_COMBINATION;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
@@ -444,14 +443,14 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    auto& scheduler = kernel.GlobalScheduler();
 | 
			
		||||
    if (static_cast<ThreadSchedStatus>(old_flags & ThreadSchedMasks::LowMask) ==
 | 
			
		||||
    if (static_cast<ThreadSchedStatus>(old_flags & static_cast<u32>(ThreadSchedMasks::LowMask)) ==
 | 
			
		||||
        ThreadSchedStatus::Runnable) {
 | 
			
		||||
        // In this case the thread was running, now it's pausing/exitting
 | 
			
		||||
        if (processor_id >= 0) {
 | 
			
		||||
            scheduler.Unschedule(current_priority, processor_id, this);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
 | 
			
		||||
        for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
 | 
			
		||||
            if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
 | 
			
		||||
                scheduler.Unsuggest(current_priority, core, this);
 | 
			
		||||
            }
 | 
			
		||||
@@ -462,7 +461,7 @@ void Thread::AdjustSchedulingOnStatus(u32 old_flags) {
 | 
			
		||||
            scheduler.Schedule(current_priority, processor_id, this);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        for (u32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
 | 
			
		||||
        for (s32 core = 0; core < GlobalScheduler::NUM_CPU_CORES; core++) {
 | 
			
		||||
            if (core != processor_id && ((affinity_mask >> core) & 1) != 0) {
 | 
			
		||||
                scheduler.Suggest(current_priority, core, this);
 | 
			
		||||
            }
 | 
			
		||||
 
 | 
			
		||||
@@ -82,19 +82,25 @@ enum class ThreadSchedStatus : u32 {
 | 
			
		||||
    Exited = 3,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
enum ThreadSchedFlags : u32 {
 | 
			
		||||
enum class ThreadSchedFlags : u32 {
 | 
			
		||||
    ProcessPauseFlag = 1 << 4,
 | 
			
		||||
    ThreadPauseFlag = 1 << 5,
 | 
			
		||||
    ProcessDebugPauseFlag = 1 << 6,
 | 
			
		||||
    KernelInitPauseFlag = 1 << 8,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
enum ThreadSchedMasks : u32 {
 | 
			
		||||
enum class ThreadSchedMasks : u32 {
 | 
			
		||||
    LowMask = 0x000f,
 | 
			
		||||
    HighMask = 0xfff0,
 | 
			
		||||
    ForcePauseMask = 0x0070,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
enum class CoreFlags : s32 {
 | 
			
		||||
    IgnoreIdealCore = -1,
 | 
			
		||||
    ProcessIdealCore = -2,
 | 
			
		||||
    DontChangeIdealCore = -3,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class Thread final : public WaitObject {
 | 
			
		||||
public:
 | 
			
		||||
    using MutexWaitingThreads = std::vector<SharedPtr<Thread>>;
 | 
			
		||||
@@ -428,7 +434,8 @@ public:
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ThreadSchedStatus GetSchedulingStatus() const {
 | 
			
		||||
        return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask);
 | 
			
		||||
        return static_cast<ThreadSchedStatus>(scheduling_state &
 | 
			
		||||
                                              static_cast<u32>(ThreadSchedMasks::LowMask));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool IsRunning() const {
 | 
			
		||||
@@ -471,7 +478,8 @@ private:
 | 
			
		||||
 | 
			
		||||
    u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
 | 
			
		||||
    u64 last_running_ticks = 0;   ///< CPU tick when thread was last running
 | 
			
		||||
    u64 yield_count = 0;          ///< Number of innecessaries yields occured.
 | 
			
		||||
    u64 yield_count = 0;          ///< Number of redundant yields carried by this thread.
 | 
			
		||||
                                  ///< a redundant yield is one where no scheduling is changed
 | 
			
		||||
 | 
			
		||||
    s32 processor_id = 0;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user