diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index 65576b8c4b..fd911a3a5e 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -49,4 +49,26 @@ bool GlobalSchedulerContext::IsLocked() const {
     return scheduler_lock.IsLockedByCurrentThread();
 }
 
+void GlobalSchedulerContext::RegisterDummyThreadForWakeup(KThread* thread) {
+    ASSERT(IsLocked());
+
+    woken_dummy_threads.insert(thread);
+}
+
+void GlobalSchedulerContext::UnregisterDummyThreadForWakeup(KThread* thread) {
+    ASSERT(IsLocked());
+
+    woken_dummy_threads.erase(thread);
+}
+
+void GlobalSchedulerContext::WakeupWaitingDummyThreads() {
+    ASSERT(IsLocked());
+
+    for (auto* thread : woken_dummy_threads) {
+        thread->DummyThreadEndWait();
+    }
+
+    woken_dummy_threads.clear();
+}
+
 } // namespace Kernel
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 67bb9852da..220ed61922 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -4,6 +4,7 @@
 #pragma once
 
 #include <atomic>
+#include <set>
 #include <vector>
 
 #include "common/common_types.h"
@@ -58,6 +59,10 @@ public:
     /// Returns true if the global scheduler lock is acquired
     bool IsLocked() const;
 
+    void UnregisterDummyThreadForWakeup(KThread* thread);
+    void RegisterDummyThreadForWakeup(KThread* thread);
+    void WakeupWaitingDummyThreads();
+
     [[nodiscard]] LockType& SchedulerLock() {
         return scheduler_lock;
     }
@@ -76,6 +81,9 @@ private:
     KSchedulerPriorityQueue priority_queue;
     LockType scheduler_lock;
 
+    /// Lists dummy threads pending wakeup on lock release
+    std::set<KThread*> woken_dummy_threads;
+
     /// Lists all thread ids that aren't deleted/etc.
     std::vector<KThread*> thread_list;
     std::mutex global_list_guard;
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index c34ce7a178..b1cabbca06 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -81,8 +81,8 @@ void KScheduler::RescheduleCurrentHLEThread(KernelCore& kernel) {
     // HACK: we cannot schedule from this thread, it is not a core thread
     ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() == 1);
 
-    // Special case to ensure dummy threads that are waiting block
-    GetCurrentThread(kernel).IfDummyThreadTryWait();
+    // Ensure dummy threads that are waiting block.
+    GetCurrentThread(kernel).DummyThreadBeginWait();
 
     ASSERT(GetCurrentThread(kernel).GetState() != ThreadState::Waiting);
     GetCurrentThread(kernel).EnableDispatch();
@@ -314,6 +314,16 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
         idle_cores &= ~(1ULL << core_id);
     }
 
+    // HACK: any waiting dummy threads can wake up now.
+    kernel.GlobalSchedulerContext().WakeupWaitingDummyThreads();
+
+    // HACK: if we are a dummy thread, and we need to go sleep, indicate
+    // that for when the lock is released.
+    KThread* const cur_thread = GetCurrentThreadPointer(kernel);
+    if (cur_thread->IsDummyThread() && cur_thread->GetState() != ThreadState::Runnable) {
+        cur_thread->RequestDummyThreadWait();
+    }
+
     return cores_needing_scheduling;
 }
 
@@ -531,11 +541,23 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, KThread* thread, Threa
         GetPriorityQueue(kernel).Remove(thread);
         IncrementScheduledCount(thread);
         SetSchedulerUpdateNeeded(kernel);
+
+        if (thread->IsDummyThread()) {
+            // HACK: if this is a dummy thread, it should no longer wake up when the
+            // scheduler lock is released.
+            kernel.GlobalSchedulerContext().UnregisterDummyThreadForWakeup(thread);
+        }
     } else if (cur_state == ThreadState::Runnable) {
         // If we're now runnable, then we weren't previously, and we should add.
         GetPriorityQueue(kernel).PushBack(thread);
         IncrementScheduledCount(thread);
         SetSchedulerUpdateNeeded(kernel);
+
+        if (thread->IsDummyThread()) {
+            // HACK: if this is a dummy thread, it should wake up when the scheduler
+            // lock is released.
+            kernel.GlobalSchedulerContext().RegisterDummyThreadForWakeup(thread);
+        }
     }
 }
 
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index b7bfcdce31..d57b42fdf7 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -148,7 +148,9 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
     physical_affinity_mask.SetAffinity(phys_core, true);
 
     // Set the thread state.
-    thread_state = (type == ThreadType::Main) ? ThreadState::Runnable : ThreadState::Initialized;
+    thread_state = (type == ThreadType::Main || type == ThreadType::Dummy)
+                       ? ThreadState::Runnable
+                       : ThreadState::Initialized;
 
     // Set TLS address.
     tls_address = 0;
@@ -1174,30 +1176,29 @@ Result KThread::Sleep(s64 timeout) {
     R_SUCCEED();
 }
 
-void KThread::IfDummyThreadTryWait() {
-    if (!IsDummyThread()) {
-        return;
-    }
+void KThread::RequestDummyThreadWait() {
+    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
+    ASSERT(this->IsDummyThread());
 
-    if (GetState() != ThreadState::Waiting) {
-        return;
-    }
-
-    ASSERT(!kernel.IsPhantomModeForSingleCore());
-
-    // Block until we are no longer waiting.
-    std::unique_lock lk(dummy_wait_lock);
-    dummy_wait_cv.wait(
-        lk, [&] { return GetState() != ThreadState::Waiting || kernel.IsShuttingDown(); });
+    // We will block when the scheduler lock is released.
+    dummy_thread_runnable.store(false);
 }
 
-void KThread::IfDummyThreadEndWait() {
-    if (!IsDummyThread()) {
-        return;
-    }
+void KThread::DummyThreadBeginWait() {
+    ASSERT(this->IsDummyThread());
+    ASSERT(!kernel.IsPhantomModeForSingleCore());
+
+    // Block until runnable is no longer false.
+    dummy_thread_runnable.wait(false);
+}
+
+void KThread::DummyThreadEndWait() {
+    ASSERT(KScheduler::IsSchedulerLockedByCurrentThread(kernel));
+    ASSERT(this->IsDummyThread());
 
     // Wake up the waiting thread.
-    dummy_wait_cv.notify_one();
+    dummy_thread_runnable.store(true);
+    dummy_thread_runnable.notify_one();
 }
 
 void KThread::BeginWait(KThreadQueue* queue) {
@@ -1231,9 +1232,6 @@ void KThread::EndWait(Result wait_result_) {
         }
 
         wait_queue->EndWait(this, wait_result_);
-
-        // Special case for dummy threads to wakeup if necessary.
-        IfDummyThreadEndWait();
     }
 }
 
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index e2a27d6036..30aa10c9a8 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -643,8 +643,9 @@ public:
     // therefore will not block on guest kernel synchronization primitives. These methods handle
     // blocking as needed.
 
-    void IfDummyThreadTryWait();
-    void IfDummyThreadEndWait();
+    void RequestDummyThreadWait();
+    void DummyThreadBeginWait();
+    void DummyThreadEndWait();
 
     [[nodiscard]] uintptr_t GetArgument() const {
         return argument;
@@ -777,8 +778,7 @@ private:
     bool is_single_core{};
     ThreadType thread_type{};
     StepState step_state{};
-    std::mutex dummy_wait_lock;
-    std::condition_variable dummy_wait_cv;
+    std::atomic<bool> dummy_thread_runnable{true};
 
     // For debugging
     std::vector<KSynchronizationObject*> wait_objects_for_debugging;