mirror of
https://git.suyu.dev/suyu/suyu
synced 2025-09-01 08:56:32 -05:00
Revert all the trash commits that were breaking build, back to e5c47e911b
This reverts commit 592f93b26c
.
This commit is contained in:
@@ -26,6 +26,24 @@ std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callbac
|
||||
return std::make_shared<EventType>(std::move(callback), std::move(name));
|
||||
}
|
||||
|
||||
struct CoreTiming::Event {
|
||||
s64 time;
|
||||
u64 fifo_order;
|
||||
std::weak_ptr<EventType> type;
|
||||
s64 reschedule_time;
|
||||
heap_t::handle_type handle{};
|
||||
|
||||
// Sort by time, unless the times are the same, in which case sort by
|
||||
// the order added to the queue
|
||||
friend bool operator>(const Event& left, const Event& right) {
|
||||
return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
|
||||
friend bool operator<(const Event& left, const Event& right) {
|
||||
return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
};
|
||||
|
||||
CoreTiming::CoreTiming() : clock{Common::CreateOptimalClock()} {}
|
||||
|
||||
CoreTiming::~CoreTiming() {
|
||||
@@ -69,7 +87,7 @@ void CoreTiming::Pause(bool is_paused) {
|
||||
}
|
||||
|
||||
void CoreTiming::SyncPause(bool is_paused) {
|
||||
if (is_paused == paused && paused_set == is_paused) {
|
||||
if (is_paused == paused && paused_set == paused) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -94,7 +112,7 @@ bool CoreTiming::IsRunning() const {
|
||||
|
||||
bool CoreTiming::HasPendingEvents() const {
|
||||
std::scoped_lock lock{basic_lock};
|
||||
return !event_queue.empty();
|
||||
return !(wait_set && event_queue.empty());
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
@@ -103,8 +121,8 @@ void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const auto next_time{absolute_time ? ns_into_future : GetGlobalTimeNs() + ns_into_future};
|
||||
|
||||
event_queue.emplace_back(Event{next_time.count(), event_fifo_id++, event_type});
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
auto h{event_queue.emplace(Event{next_time.count(), event_fifo_id++, event_type, 0})};
|
||||
(*h).handle = h;
|
||||
}
|
||||
|
||||
event.Set();
|
||||
@@ -118,9 +136,9 @@ void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const auto next_time{absolute_time ? start_time : GetGlobalTimeNs() + start_time};
|
||||
|
||||
event_queue.emplace_back(
|
||||
Event{next_time.count(), event_fifo_id++, event_type, resched_time.count()});
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
auto h{event_queue.emplace(
|
||||
Event{next_time.count(), event_fifo_id++, event_type, resched_time.count()})};
|
||||
(*h).handle = h;
|
||||
}
|
||||
|
||||
event.Set();
|
||||
@@ -131,11 +149,17 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
{
|
||||
std::scoped_lock lk{basic_lock};
|
||||
|
||||
event_queue.erase(
|
||||
std::remove_if(event_queue.begin(), event_queue.end(),
|
||||
[&](const Event& e) { return e.type.lock().get() == event_type.get(); }),
|
||||
event_queue.end());
|
||||
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
std::vector<heap_t::handle_type> to_remove;
|
||||
for (auto itr = event_queue.begin(); itr != event_queue.end(); itr++) {
|
||||
const Event& e = *itr;
|
||||
if (e.type.lock().get() == event_type.get()) {
|
||||
to_remove.push_back(itr->handle);
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& h : to_remove) {
|
||||
event_queue.erase(h);
|
||||
}
|
||||
|
||||
event_type->sequence_number++;
|
||||
}
|
||||
@@ -148,7 +172,7 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
|
||||
void CoreTiming::AddTicks(u64 ticks_to_add) {
|
||||
cpu_ticks += ticks_to_add;
|
||||
downcount -= static_cast<s64>(ticks_to_add);
|
||||
downcount -= static_cast<s64>(cpu_ticks);
|
||||
}
|
||||
|
||||
void CoreTiming::Idle() {
|
||||
@@ -156,7 +180,7 @@ void CoreTiming::Idle() {
|
||||
}
|
||||
|
||||
void CoreTiming::ResetTicks() {
|
||||
downcount.store(MAX_SLICE_LENGTH, std::memory_order_release);
|
||||
downcount = MAX_SLICE_LENGTH;
|
||||
}
|
||||
|
||||
u64 CoreTiming::GetClockTicks() const {
|
||||
@@ -177,38 +201,48 @@ std::optional<s64> CoreTiming::Advance() {
|
||||
std::scoped_lock lock{advance_lock, basic_lock};
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
|
||||
while (!event_queue.empty() && event_queue.front().time <= global_timer) {
|
||||
Event evt = std::move(event_queue.front());
|
||||
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
event_queue.pop_back();
|
||||
while (!event_queue.empty() && event_queue.top().time <= global_timer) {
|
||||
const Event& evt = event_queue.top();
|
||||
|
||||
if (const auto event_type = evt.type.lock()) {
|
||||
if (const auto event_type{evt.type.lock()}) {
|
||||
const auto evt_time = evt.time;
|
||||
const auto evt_sequence_num = event_type->sequence_number;
|
||||
|
||||
basic_lock.unlock();
|
||||
if (evt.reschedule_time == 0) {
|
||||
event_queue.pop();
|
||||
|
||||
const auto new_schedule_time = event_type->callback(
|
||||
evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});
|
||||
basic_lock.unlock();
|
||||
|
||||
basic_lock.lock();
|
||||
event_type->callback(
|
||||
evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});
|
||||
|
||||
if (evt_sequence_num != event_type->sequence_number) {
|
||||
continue;
|
||||
}
|
||||
basic_lock.lock();
|
||||
} else {
|
||||
basic_lock.unlock();
|
||||
|
||||
if (new_schedule_time.has_value() || evt.reschedule_time != 0) {
|
||||
const auto next_schedule_time = new_schedule_time.value_or(
|
||||
std::chrono::nanoseconds{evt.reschedule_time});
|
||||
const auto new_schedule_time{event_type->callback(
|
||||
evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time})};
|
||||
|
||||
auto next_time = evt.time + next_schedule_time.count();
|
||||
if (evt.time < pause_end_time) {
|
||||
next_time = pause_end_time + next_schedule_time.count();
|
||||
basic_lock.lock();
|
||||
|
||||
if (evt_sequence_num != event_type->sequence_number) {
|
||||
// Heap handle is invalidated after external modification.
|
||||
continue;
|
||||
}
|
||||
|
||||
event_queue.emplace_back(Event{next_time, event_fifo_id++, evt.type,
|
||||
next_schedule_time.count()});
|
||||
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||
const auto next_schedule_time{new_schedule_time.has_value()
|
||||
? new_schedule_time.value().count()
|
||||
: evt.reschedule_time};
|
||||
|
||||
// If this event was scheduled into a pause, its time now is going to be way
|
||||
// behind. Re-set this event to continue from the end of the pause.
|
||||
auto next_time{evt.time + next_schedule_time};
|
||||
if (evt.time < pause_end_time) {
|
||||
next_time = pause_end_time + next_schedule_time;
|
||||
}
|
||||
|
||||
event_queue.update(evt.handle, Event{next_time, event_fifo_id++, evt.type,
|
||||
next_schedule_time, evt.handle});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,7 +250,7 @@ std::optional<s64> CoreTiming::Advance() {
|
||||
}
|
||||
|
||||
if (!event_queue.empty()) {
|
||||
return event_queue.front().time;
|
||||
return event_queue.top().time;
|
||||
} else {
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -235,7 +269,7 @@ void CoreTiming::ThreadLoop() {
|
||||
#ifdef _WIN32
|
||||
while (!paused && !event.IsSet() && wait_time > 0) {
|
||||
wait_time = *next_time - GetGlobalTimeNs().count();
|
||||
if (wait_time >= 1'000'000) { // 1ms
|
||||
if (wait_time >= timer_resolution_ns) {
|
||||
Common::Windows::SleepForOneTick();
|
||||
} else {
|
||||
#ifdef ARCHITECTURE_x86_64
|
||||
@@ -256,8 +290,10 @@ void CoreTiming::ThreadLoop() {
|
||||
} else {
|
||||
// Queue is empty, wait until another event is scheduled and signals us to
|
||||
// continue.
|
||||
wait_set = true;
|
||||
event.Wait();
|
||||
}
|
||||
wait_set = false;
|
||||
}
|
||||
|
||||
paused_set = true;
|
||||
@@ -291,4 +327,10 @@ std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
|
||||
return std::chrono::microseconds{Common::WallClock::CPUTickToUS(cpu_ticks)};
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
void CoreTiming::SetTimerResolutionNs(std::chrono::nanoseconds ns) {
|
||||
timer_resolution_ns = ns.count();
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace Core::Timing
|
||||
|
@@ -11,7 +11,8 @@
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/heap/fibonacci_heap.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/thread.h"
|
||||
@@ -42,6 +43,18 @@ enum class UnscheduleEventType {
|
||||
NoWait,
|
||||
};
|
||||
|
||||
/**
|
||||
* This is a system to schedule events into the emulated machine's future. Time is measured
|
||||
* in main CPU clock cycles.
|
||||
*
|
||||
* To schedule an event, you first have to register its type. This is where you pass in the
|
||||
* callback. You then schedule events using the type ID you get back.
|
||||
*
|
||||
* The s64 ns_late that the callbacks get is how many ns late it was.
|
||||
* So to schedule a new event on a regular basis:
|
||||
* inside callback:
|
||||
* ScheduleEvent(period_in_ns - ns_late, callback, "whatever")
|
||||
*/
|
||||
class CoreTiming {
|
||||
public:
|
||||
CoreTiming();
|
||||
@@ -53,56 +66,99 @@ public:
|
||||
CoreTiming& operator=(const CoreTiming&) = delete;
|
||||
CoreTiming& operator=(CoreTiming&&) = delete;
|
||||
|
||||
/// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
|
||||
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
|
||||
void Initialize(std::function<void()>&& on_thread_init_);
|
||||
|
||||
/// Clear all pending events. This should ONLY be done on exit.
|
||||
void ClearPendingEvents();
|
||||
|
||||
/// Sets if emulation is multicore or single core, must be set before Initialize
|
||||
void SetMulticore(bool is_multicore_) {
|
||||
is_multicore = is_multicore_;
|
||||
}
|
||||
|
||||
/// Pauses/Unpauses the execution of the timer thread.
|
||||
void Pause(bool is_paused);
|
||||
|
||||
/// Pauses/Unpauses the execution of the timer thread and waits until paused.
|
||||
void SyncPause(bool is_paused);
|
||||
|
||||
/// Checks if core timing is running.
|
||||
bool IsRunning() const;
|
||||
|
||||
/// Checks if the timer thread has started.
|
||||
bool HasStarted() const {
|
||||
return has_started;
|
||||
}
|
||||
|
||||
/// Checks if there are any pending time events.
|
||||
bool HasPendingEvents() const;
|
||||
|
||||
/// Schedules an event in core timing
|
||||
void ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
const std::shared_ptr<EventType>& event_type, bool absolute_time = false);
|
||||
|
||||
/// Schedules an event which will automatically re-schedule itself with the given time, until
|
||||
/// unscheduled
|
||||
void ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
|
||||
std::chrono::nanoseconds resched_time,
|
||||
const std::shared_ptr<EventType>& event_type,
|
||||
bool absolute_time = false);
|
||||
|
||||
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
UnscheduleEventType type = UnscheduleEventType::Wait);
|
||||
|
||||
void AddTicks(u64 ticks_to_add);
|
||||
|
||||
void ResetTicks();
|
||||
|
||||
void Idle();
|
||||
|
||||
s64 GetDowncount() const {
|
||||
return downcount.load(std::memory_order_relaxed);
|
||||
return downcount;
|
||||
}
|
||||
|
||||
/// Returns the current CNTPCT tick value.
|
||||
u64 GetClockTicks() const;
|
||||
|
||||
/// Returns the current GPU tick value.
|
||||
u64 GetGPUTicks() const;
|
||||
|
||||
/// Returns current time in microseconds.
|
||||
std::chrono::microseconds GetGlobalTimeUs() const;
|
||||
|
||||
/// Returns current time in nanoseconds.
|
||||
std::chrono::nanoseconds GetGlobalTimeNs() const;
|
||||
|
||||
/// Checks for events manually and returns time in nanoseconds for next event, threadsafe.
|
||||
std::optional<s64> Advance();
|
||||
|
||||
#ifdef _WIN32
|
||||
void SetTimerResolutionNs(std::chrono::nanoseconds ns);
|
||||
#endif
|
||||
|
||||
private:
|
||||
struct Event {
|
||||
s64 time;
|
||||
u64 fifo_order;
|
||||
std::shared_ptr<EventType> type;
|
||||
bool operator>(const Event& other) const {
|
||||
return std::tie(time, fifo_order) > std::tie(other.time, other.fifo_order);
|
||||
}
|
||||
};
|
||||
struct Event;
|
||||
|
||||
static void ThreadEntry(CoreTiming& instance);
|
||||
void ThreadLoop();
|
||||
|
||||
void Reset();
|
||||
|
||||
std::unique_ptr<Common::WallClock> clock;
|
||||
std::atomic<s64> global_timer{0};
|
||||
std::vector<Event> event_queue;
|
||||
std::atomic<u64> event_fifo_id{0};
|
||||
|
||||
s64 global_timer = 0;
|
||||
|
||||
#ifdef _WIN32
|
||||
s64 timer_resolution_ns;
|
||||
#endif
|
||||
|
||||
using heap_t =
|
||||
boost::heap::fibonacci_heap<CoreTiming::Event, boost::heap::compare<std::greater<>>>;
|
||||
|
||||
heap_t event_queue;
|
||||
u64 event_fifo_id = 0;
|
||||
|
||||
Common::Event event{};
|
||||
Common::Event pause_event{};
|
||||
@@ -117,12 +173,20 @@ private:
|
||||
std::function<void()> on_thread_init{};
|
||||
|
||||
bool is_multicore{};
|
||||
std::atomic<s64> pause_end_time{};
|
||||
s64 pause_end_time{};
|
||||
|
||||
std::atomic<u64> cpu_ticks{};
|
||||
std::atomic<s64> downcount{};
|
||||
/// Cycle timing
|
||||
u64 cpu_ticks{};
|
||||
s64 downcount{};
|
||||
};
|
||||
|
||||
/// Creates a core timing event with the given name and callback.
|
||||
///
|
||||
/// @param name The name of the core timing event to create.
|
||||
/// @param callback The callback to execute for the event.
|
||||
///
|
||||
/// @returns An EventType instance representing the created event.
|
||||
///
|
||||
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback);
|
||||
|
||||
} // namespace Core::Timing
|
||||
|
@@ -1,12 +1,6 @@
|
||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "common/fiber.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/scope_exit.h"
|
||||
@@ -30,7 +24,6 @@ void CpuManager::Initialize() {
|
||||
num_cores = is_multicore ? Core::Hardware::NUM_CPU_CORES : 1;
|
||||
gpu_barrier = std::make_unique<Common::Barrier>(num_cores + 1);
|
||||
|
||||
core_data.resize(num_cores);
|
||||
for (std::size_t core = 0; core < num_cores; core++) {
|
||||
core_data[core].host_thread =
|
||||
std::jthread([this, core](std::stop_token token) { RunThread(token, core); });
|
||||
@@ -38,10 +31,10 @@ void CpuManager::Initialize() {
|
||||
}
|
||||
|
||||
void CpuManager::Shutdown() {
|
||||
for (auto& data : core_data) {
|
||||
if (data.host_thread.joinable()) {
|
||||
data.host_thread.request_stop();
|
||||
data.host_thread.join();
|
||||
for (std::size_t core = 0; core < num_cores; core++) {
|
||||
if (core_data[core].host_thread.joinable()) {
|
||||
core_data[core].host_thread.request_stop();
|
||||
core_data[core].host_thread.join();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -73,7 +66,12 @@ void CpuManager::HandleInterrupt() {
|
||||
Kernel::KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_index));
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
/// MultiCore ///
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void CpuManager::MultiCoreRunGuestThread() {
|
||||
// Similar to UserModeThreadStarter in HOS
|
||||
auto& kernel = system.Kernel();
|
||||
auto* thread = Kernel::GetCurrentThreadPointer(kernel);
|
||||
kernel.CurrentScheduler()->OnThreadStart();
|
||||
@@ -90,6 +88,10 @@ void CpuManager::MultiCoreRunGuestThread() {
|
||||
}
|
||||
|
||||
void CpuManager::MultiCoreRunIdleThread() {
|
||||
// Not accurate to HOS. Remove this entire method when singlecore is removed.
|
||||
// See notes in KScheduler::ScheduleImpl for more information about why this
|
||||
// is inaccurate.
|
||||
|
||||
auto& kernel = system.Kernel();
|
||||
kernel.CurrentScheduler()->OnThreadStart();
|
||||
|
||||
@@ -103,6 +105,10 @@ void CpuManager::MultiCoreRunIdleThread() {
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
/// SingleCore ///
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void CpuManager::SingleCoreRunGuestThread() {
|
||||
auto& kernel = system.Kernel();
|
||||
auto* thread = Kernel::GetCurrentThreadPointer(kernel);
|
||||
@@ -148,16 +154,19 @@ void CpuManager::PreemptSingleCore(bool from_running_environment) {
|
||||
system.CoreTiming().Advance();
|
||||
kernel.SetIsPhantomModeForSingleCore(false);
|
||||
}
|
||||
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES, std::memory_order_release);
|
||||
current_core.store((current_core + 1) % Core::Hardware::NUM_CPU_CORES);
|
||||
system.CoreTiming().ResetTicks();
|
||||
kernel.Scheduler(current_core).PreemptSingleCore();
|
||||
|
||||
// We've now been scheduled again, and we may have exchanged schedulers.
|
||||
// Reload the scheduler in case it's different.
|
||||
if (!kernel.Scheduler(current_core).IsIdle()) {
|
||||
idle_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::GuestActivate() {
|
||||
// Similar to the HorizonKernelMain callback in HOS
|
||||
auto& kernel = system.Kernel();
|
||||
auto* scheduler = kernel.CurrentScheduler();
|
||||
|
||||
@@ -175,19 +184,27 @@ void CpuManager::ShutdownThread() {
|
||||
}
|
||||
|
||||
void CpuManager::RunThread(std::stop_token token, std::size_t core) {
|
||||
/// Initialization
|
||||
system.RegisterCoreThread(core);
|
||||
std::string name = is_multicore ? "CPUCore_" + std::to_string(core) : "CPUThread";
|
||||
std::string name;
|
||||
if (is_multicore) {
|
||||
name = "CPUCore_" + std::to_string(core);
|
||||
} else {
|
||||
name = "CPUThread";
|
||||
}
|
||||
MicroProfileOnThreadCreate(name.c_str());
|
||||
Common::SetCurrentThreadName(name.c_str());
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
|
||||
auto& data = core_data[core];
|
||||
data.host_context = Common::Fiber::ThreadToFiber();
|
||||
|
||||
// Cleanup
|
||||
SCOPE_EXIT {
|
||||
data.host_context->Exit();
|
||||
MicroProfileOnThreadExit();
|
||||
};
|
||||
|
||||
// Running
|
||||
if (!gpu_barrier->Sync(token)) {
|
||||
return;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user