Scheduler: Implement Yield Count and Core migration on Thread Preemption.
This commit is contained in:
		
				
					committed by
					
						
						FernandoS27
					
				
			
			
				
	
			
			
			
						parent
						
							2d382de6fa
						
					
				
				
					commit
					0cf26cee59
				
			@@ -241,10 +241,83 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
 | 
			
		||||
void GlobalScheduler::PreemptThreads() {
 | 
			
		||||
    for (std::size_t core_id = 0; core_id < NUM_CPU_CORES; core_id++) {
 | 
			
		||||
        const u32 priority = preemption_priorities[core_id];
 | 
			
		||||
        if (scheduled_queue[core_id].size(priority) > 1) {
 | 
			
		||||
 | 
			
		||||
        if (scheduled_queue[core_id].size(priority) > 0) {
 | 
			
		||||
            scheduled_queue[core_id].front(priority)->IncrementYieldCount();
 | 
			
		||||
            scheduled_queue[core_id].yield(priority);
 | 
			
		||||
            reselection_pending.store(true, std::memory_order_release);
 | 
			
		||||
            if (scheduled_queue[core_id].size(priority) > 1) {
 | 
			
		||||
                scheduled_queue[core_id].front(priority)->IncrementYieldCount();
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        Thread* current_thread =
 | 
			
		||||
            scheduled_queue[core_id].empty() ? nullptr : scheduled_queue[core_id].front();
 | 
			
		||||
        Thread* winner = nullptr;
 | 
			
		||||
        for (auto& thread : suggested_queue[core_id]) {
 | 
			
		||||
            const s32 source_core = thread->GetProcessorID();
 | 
			
		||||
            if (thread->GetPriority() != priority) {
 | 
			
		||||
                continue;
 | 
			
		||||
            }
 | 
			
		||||
            if (source_core >= 0) {
 | 
			
		||||
                Thread* next_thread = scheduled_queue[source_core].empty()
 | 
			
		||||
                                          ? nullptr
 | 
			
		||||
                                          : scheduled_queue[source_core].front();
 | 
			
		||||
                if (next_thread != nullptr && next_thread->GetPriority() < 2) {
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
                if (next_thread == thread) {
 | 
			
		||||
                    continue;
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
            if (current_thread != nullptr &&
 | 
			
		||||
                current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
 | 
			
		||||
                winner = thread;
 | 
			
		||||
                break;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (winner != nullptr) {
 | 
			
		||||
            if (winner->IsRunning()) {
 | 
			
		||||
                UnloadThread(winner->GetProcessorID());
 | 
			
		||||
            }
 | 
			
		||||
            TransferToCore(winner->GetPriority(), core_id, winner);
 | 
			
		||||
            current_thread = winner->GetPriority() <= current_thread->GetPriority() ? winner : current_thread;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (current_thread != nullptr && current_thread->GetPriority() > priority) {
 | 
			
		||||
            for (auto& thread : suggested_queue[core_id]) {
 | 
			
		||||
                const s32 source_core = thread->GetProcessorID();
 | 
			
		||||
                if (thread->GetPriority() > priority) {
 | 
			
		||||
                    continue;
 | 
			
		||||
                }
 | 
			
		||||
                if (source_core >= 0) {
 | 
			
		||||
                    Thread* next_thread = scheduled_queue[source_core].empty()
 | 
			
		||||
                                              ? nullptr
 | 
			
		||||
                                              : scheduled_queue[source_core].front();
 | 
			
		||||
                    if (next_thread != nullptr && next_thread->GetPriority() < 2) {
 | 
			
		||||
                        break;
 | 
			
		||||
                    }
 | 
			
		||||
                    if (next_thread == thread) {
 | 
			
		||||
                        continue;
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
                if (current_thread != nullptr &&
 | 
			
		||||
                    current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
 | 
			
		||||
                    winner = thread;
 | 
			
		||||
                    break;
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            if (winner != nullptr) {
 | 
			
		||||
                if (winner->IsRunning()) {
 | 
			
		||||
                    UnloadThread(winner->GetProcessorID());
 | 
			
		||||
                }
 | 
			
		||||
                TransferToCore(winner->GetPriority(), core_id, winner);
 | 
			
		||||
                current_thread = winner;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        reselection_pending.store(true, std::memory_order_release);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -260,9 +333,7 @@ void GlobalScheduler::SchedulePrepend(u32 priority, u32 core, Thread* thread) {
 | 
			
		||||
 | 
			
		||||
bool GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
 | 
			
		||||
    if (current_thread == winner) {
 | 
			
		||||
        // TODO(blinkhawk): manage redundant operations, this is not implemented.
 | 
			
		||||
        // as its mostly an optimization.
 | 
			
		||||
        // current_thread->SetRedundantSchedulerOperation();
 | 
			
		||||
        current_thread->IncrementYieldCount();
 | 
			
		||||
        return true;
 | 
			
		||||
    } else {
 | 
			
		||||
        reselection_pending.store(true, std::memory_order_release);
 | 
			
		||||
 
 | 
			
		||||
@@ -416,6 +416,14 @@ public:
 | 
			
		||||
    /// Yields this thread and if the core is left idle, loads are rebalanced
 | 
			
		||||
    bool YieldAndWaitForLoadBalancing();
 | 
			
		||||
 | 
			
		||||
    void IncrementYieldCount() {
 | 
			
		||||
        yield_count++;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    u64 GetYieldCount() const {
 | 
			
		||||
        return yield_count;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    ThreadSchedStatus GetSchedulingStatus() const {
 | 
			
		||||
        return static_cast<ThreadSchedStatus>(scheduling_state & ThreadSchedMasks::LowMask);
 | 
			
		||||
    }
 | 
			
		||||
@@ -460,6 +468,7 @@ private:
 | 
			
		||||
 | 
			
		||||
    u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
 | 
			
		||||
    u64 last_running_ticks = 0;   ///< CPU tick when thread was last running
 | 
			
		||||
    u64 yield_count = 0; ///< Number of innecessaries yields occured.
 | 
			
		||||
 | 
			
		||||
    s32 processor_id = 0;
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user