diff --git a/src/core/hle/kernel/hle_ipc.h b/src/core/hle/kernel/hle_ipc.h
index 55e6fb9f7..754b41ff6 100644
--- a/src/core/hle/kernel/hle_ipc.h
+++ b/src/core/hle/kernel/hle_ipc.h
@@ -341,10 +341,6 @@ public:
         return *thread;
     }
 
-    bool IsThreadWaiting() const {
-        return is_thread_waiting;
-    }
-
 private:
     friend class IPC::ResponseBuilder;
 
@@ -379,7 +375,6 @@ private:
     u32 domain_offset{};
 
     std::shared_ptr<SessionRequestManager> manager;
-    bool is_thread_waiting{};
 
     KernelCore& kernel;
     Core::Memory::Memory& memory;
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index f4d71ad7e..0b894c8cf 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -45,6 +45,7 @@ concept KPriorityQueueMember = !std::is_reference_v<T> && requires(T & t) {
 
     { t.GetActiveCore() } -> Common::ConvertibleTo<s32>;
     { t.GetPriority() } -> Common::ConvertibleTo<s32>;
+    { t.IsDummyThread() } -> Common::ConvertibleTo<bool>;
 };
 
 template <typename Member, size_t NumCores_, int LowestPriority, int HighestPriority>
@@ -349,24 +350,49 @@ public:
 
     // Mutators.
     constexpr void PushBack(Member* member) {
+        // This is for host (dummy) threads that we do not want to enter the priority queue.
+        if (member->IsDummyThread()) {
+            return;
+        }
+
         this->PushBack(member->GetPriority(), member);
     }
 
     constexpr void Remove(Member* member) {
+        // This is for host (dummy) threads that we do not want to enter the priority queue.
+        if (member->IsDummyThread()) {
+            return;
+        }
+
         this->Remove(member->GetPriority(), member);
     }
 
     constexpr void MoveToScheduledFront(Member* member) {
+        // This is for host (dummy) threads that we do not want to enter the priority queue.
+        if (member->IsDummyThread()) {
+            return;
+        }
+
         this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
     }
 
     constexpr KThread* MoveToScheduledBack(Member* member) {
+        // This is for host (dummy) threads that we do not want to enter the priority queue.
+        if (member->IsDummyThread()) {
+            return {};
+        }
+
         return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(),
                                                 member);
     }
 
     // First class fancy operations.
     constexpr void ChangePriority(s32 prev_priority, bool is_running, Member* member) {
+        // This is for host (dummy) threads that we do not want to enter the priority queue.
+        if (member->IsDummyThread()) {
+            return;
+        }
+
         ASSERT(IsValidPriority(prev_priority));
 
         // Remove the member from the queues.
@@ -383,6 +409,11 @@ public:
 
     constexpr void ChangeAffinityMask(s32 prev_core, const AffinityMaskType& prev_affinity,
                                       Member* member) {
+        // This is for host (dummy) threads that we do not want to enter the priority queue.
+        if (member->IsDummyThread()) {
+            return;
+        }
+
         // Get the new information.
         const s32 priority = member->GetPriority();
         const AffinityMaskType& new_affinity = member->GetAffinityMask();
@@ -412,6 +443,11 @@ public:
     }
 
     constexpr void ChangeCore(s32 prev_core, Member* member, bool to_front = false) {
+        // This is for host (dummy) threads that we do not want to enter the priority queue.
+        if (member->IsDummyThread()) {
+            return;
+        }
+
         // Get the new information.
         const s32 new_core = member->GetActiveCore();
         const s32 priority = member->GetPriority();
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index f900b2e7a..b32d4f285 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -406,6 +406,9 @@ void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduli
     } else {
         RescheduleCores(kernel, cores_needing_scheduling);
     }
+
+    // Special case to ensure dummy threads that are waiting block.
+    current_thread->IfDummyThreadTryWait();
 }
 
 u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -739,6 +742,12 @@ void KScheduler::ScheduleImpl() {
         next_thread = idle_thread;
     }
 
+    // We never want to schedule a dummy thread, as these are only used by host threads for locking.
+    if (next_thread->GetThreadType() == ThreadType::Dummy) {
+        ASSERT_MSG(false, "Dummy threads should never be scheduled!");
+        next_thread = idle_thread;
+    }
+
     // If we're not actually switching thread, there's nothing to do.
     if (next_thread == current_thread.load()) {
         previous_thread->EnableDispatch();
diff --git a/src/core/hle/kernel/k_server_session.cpp b/src/core/hle/kernel/k_server_session.cpp
index d4e4a6b06..4d94eb9cf 100644
--- a/src/core/hle/kernel/k_server_session.cpp
+++ b/src/core/hle/kernel/k_server_session.cpp
@@ -8,7 +8,6 @@
 #include "common/assert.h"
 #include "common/common_types.h"
 #include "common/logging/log.h"
-#include "common/scope_exit.h"
 #include "core/core_timing.h"
 #include "core/hle/ipc_helpers.h"
 #include "core/hle/kernel/hle_ipc.h"
@@ -123,20 +122,10 @@ ResultCode KServerSession::QueueSyncRequest(KThread* thread, Core::Memory::Memor
 
     context->PopulateFromIncomingCommandBuffer(kernel.CurrentProcess()->GetHandleTable(), cmd_buf);
 
-    // In the event that something fails here, stub a result to prevent the game from crashing.
-    // This is a work-around in the event that somehow we process a service request after the
-    // session has been closed by the game. This has been observed to happen rarely in Pokemon
-    // Sword/Shield and is likely a result of us using host threads/scheduling for services.
-    // TODO(bunnei): Find a better solution here.
-    auto error_guard = SCOPE_GUARD({ CompleteSyncRequest(*context); });
-
     // Ensure we have a session request handler
     if (manager->HasSessionRequestHandler(*context)) {
         if (auto strong_ptr = manager->GetServiceThread().lock()) {
             strong_ptr->QueueSyncRequest(*parent, std::move(context));
-
-            // We succeeded.
-            error_guard.Cancel();
         } else {
             ASSERT_MSG(false, "strong_ptr is nullptr!");
         }
@@ -171,13 +160,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
         convert_to_domain = false;
     }
 
-    // Some service requests require the thread to block
-    {
-        KScopedSchedulerLock lock(kernel);
-        if (!context.IsThreadWaiting()) {
-            context.GetThread().EndWait(result);
-        }
-    }
+    // The calling thread is waiting for this request to complete, so wake it up.
+    context.GetThread().EndWait(result);
 
     return result;
 }
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 7a5e6fc08..f42abb8a1 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -106,7 +106,7 @@ KThread::~KThread() = default;
 ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack_top, s32 prio,
                                s32 virt_core, KProcess* owner, ThreadType type) {
     // Assert parameters are valid.
-    ASSERT((type == ThreadType::Main) ||
+    ASSERT((type == ThreadType::Main) || (type == ThreadType::Dummy) ||
            (Svc::HighestThreadPriority <= prio && prio <= Svc::LowestThreadPriority));
     ASSERT((owner != nullptr) || (type != ThreadType::User));
     ASSERT(0 <= virt_core && virt_core < static_cast<s32>(Common::BitSize<u64>()));
@@ -140,7 +140,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
         UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
         break;
     }
-    thread_type_for_debugging = type;
+    thread_type = type;
 
     // Set the ideal core ID and affinity mask.
     virtual_ideal_core_id = virt_core;
@@ -262,7 +262,7 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
 }
 
 ResultCode KThread::InitializeDummyThread(KThread* thread) {
-    return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy);
+    return thread->Initialize({}, {}, {}, DummyThreadPriority, 3, {}, ThreadType::Dummy);
 }
 
 ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
@@ -1075,12 +1075,46 @@ ResultCode KThread::Sleep(s64 timeout) {
     return ResultSuccess;
 }
 
+void KThread::IfDummyThreadTryWait() {
+    if (!IsDummyThread()) {
+        return;
+    }
+
+    if (GetState() != ThreadState::Waiting) {
+        return;
+    }
+
+    // Block until we can grab the lock.
+    KScopedSpinLock lk{dummy_wait_lock};
+}
+
+void KThread::IfDummyThreadBeginWait() {
+    if (!IsDummyThread()) {
+        return;
+    }
+
+    // Ensure the thread will block when IfDummyThreadTryWait is called.
+    dummy_wait_lock.Lock();
+}
+
+void KThread::IfDummyThreadEndWait() {
+    if (!IsDummyThread()) {
+        return;
+    }
+
+    // Ensure the thread will no longer block.
+    dummy_wait_lock.Unlock();
+}
+
 void KThread::BeginWait(KThreadQueue* queue) {
     // Set our state as waiting.
     SetState(ThreadState::Waiting);
 
     // Set our wait queue.
     wait_queue = queue;
+
+    // Special case for dummy threads to ensure they block.
+    IfDummyThreadBeginWait();
 }
 
 void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
@@ -1099,7 +1133,16 @@ void KThread::EndWait(ResultCode wait_result_) {
 
     // If we're waiting, notify our queue that we're available.
     if (GetState() == ThreadState::Waiting) {
+        if (wait_queue == nullptr) {
+            // This should never happen, but avoid a hard crash below to get this logged.
+            ASSERT_MSG(false, "wait_queue is nullptr!");
+            return;
+        }
+
         wait_queue->EndWait(this, wait_result_);
+
+        // Special case for dummy threads to wakeup if necessary.
+        IfDummyThreadEndWait();
     }
 }
 
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index cc427f6cf..d058db62c 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -112,6 +112,7 @@ private:
 public:
     static constexpr s32 DefaultThreadPriority = 44;
     static constexpr s32 IdleThreadPriority = Svc::LowestThreadPriority + 1;
+    static constexpr s32 DummyThreadPriority = Svc::LowestThreadPriority + 2;
 
     explicit KThread(KernelCore& kernel_);
     ~KThread() override;
@@ -553,8 +554,12 @@ public:
         return wait_reason_for_debugging;
     }
 
-    [[nodiscard]] ThreadType GetThreadTypeForDebugging() const {
-        return thread_type_for_debugging;
+    [[nodiscard]] ThreadType GetThreadType() const {
+        return thread_type;
+    }
+
+    [[nodiscard]] bool IsDummyThread() const {
+        return GetThreadType() == ThreadType::Dummy;
     }
 
     void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
@@ -631,6 +636,14 @@ public:
         return condvar_key;
     }
 
+    // Dummy threads (used for HLE host threads) cannot wait based on the guest scheduler, and
+    // therefore will not block on guest kernel synchronization primitives. These methods handle
+    // blocking as needed.
+
+    void IfDummyThreadTryWait();
+    void IfDummyThreadBeginWait();
+    void IfDummyThreadEndWait();
+
 private:
     static constexpr size_t PriorityInheritanceCountMax = 10;
     union SyncObjectBuffer {
@@ -749,16 +762,17 @@ private:
     bool resource_limit_release_hint{};
     StackParameters stack_parameters{};
     KSpinLock context_guard{};
+    KSpinLock dummy_wait_lock{};
 
     // For emulation
     std::shared_ptr<Common::Fiber> host_context{};
     bool is_single_core{};
+    ThreadType thread_type{};
 
     // For debugging
     std::vector<KSynchronizationObject*> wait_objects_for_debugging;
     VAddr mutex_wait_address_for_debugging{};
     ThreadWaitReasonForDebugging wait_reason_for_debugging{};
-    ThreadType thread_type_for_debugging{};
 
 public:
     using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 887c1fd27..49c0714ed 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -301,12 +301,10 @@ struct KernelCore::Impl {
     // Gets the dummy KThread for the caller, allocating a new one if this is the first time
     KThread* GetHostDummyThread() {
         auto make_thread = [this]() {
-            std::lock_guard lk(dummy_thread_lock);
-            auto& thread = dummy_threads.emplace_back(std::make_unique<KThread>(system.Kernel()));
-            KAutoObject::Create(thread.get());
-            ASSERT(KThread::InitializeDummyThread(thread.get()).IsSuccess());
+            KThread* thread = KThread::Create(system.Kernel());
+            ASSERT(KThread::InitializeDummyThread(thread).IsSuccess());
             thread->SetName(fmt::format("DummyThread:{}", GetHostThreadId()));
-            return thread.get();
+            return thread;
         };
 
         thread_local KThread* saved_thread = make_thread();
@@ -731,7 +729,6 @@ struct KernelCore::Impl {
     std::mutex server_sessions_lock;
     std::mutex registered_objects_lock;
     std::mutex registered_in_use_objects_lock;
-    std::mutex dummy_thread_lock;
 
     std::atomic<u32> next_object_id{0};
     std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
@@ -788,9 +785,6 @@ struct KernelCore::Impl {
     std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES> interrupts{};
     std::array<std::unique_ptr<Kernel::KScheduler>, Core::Hardware::NUM_CPU_CORES> schedulers{};
 
-    // Specifically tracked to be automatically destroyed with kernel
-    std::vector<std::unique_ptr<KThread>> dummy_threads;
-
     bool is_multicore{};
     std::atomic_bool is_shutting_down{};
     bool is_phantom_mode_for_singlecore{};
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
index 03f3dec10..4eb3a5988 100644
--- a/src/core/hle/kernel/service_thread.cpp
+++ b/src/core/hle/kernel/service_thread.cpp
@@ -12,6 +12,7 @@
 #include "common/scope_exit.h"
 #include "common/thread.h"
 #include "core/hle/kernel/k_session.h"
+#include "core/hle/kernel/k_thread.h"
 #include "core/hle/kernel/kernel.h"
 #include "core/hle/kernel/service_thread.h"
 
@@ -50,6 +51,10 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
 
             kernel.RegisterHostThread();
 
+            // Ensure the dummy thread allocated for this host thread is closed on exit.
+            auto* dummy_thread = kernel.GetCurrentEmuThread();
+            SCOPE_EXIT({ dummy_thread->Close(); });
+
             while (true) {
                 std::function<void()> task;
 
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index 1f41c46c4..2d1a2d9cb 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -95,7 +95,7 @@ std::vector<std::unique_ptr<WaitTreeThread>> WaitTreeItem::MakeThreadItemList(
     std::size_t row = 0;
     auto add_threads = [&](const std::vector<Kernel::KThread*>& threads) {
         for (std::size_t i = 0; i < threads.size(); ++i) {
-            if (threads[i]->GetThreadTypeForDebugging() == Kernel::ThreadType::User) {
+            if (threads[i]->GetThreadType() == Kernel::ThreadType::User) {
                 item_list.push_back(std::make_unique<WaitTreeThread>(*threads[i], system));
                 item_list.back()->row = row;
             }
@@ -153,7 +153,7 @@ QString WaitTreeCallstack::GetText() const {
 std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() const {
     std::vector<std::unique_ptr<WaitTreeItem>> list;
 
-    if (thread.GetThreadTypeForDebugging() != Kernel::ThreadType::User) {
+    if (thread.GetThreadType() != Kernel::ThreadType::User) {
         return list;
     }