From b164d8ee536dba526f9da2083433d529daf7b37b Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow <fsahmkow27@gmail.com>
Date: Fri, 29 Mar 2019 17:01:17 -0400
Subject: [PATCH] Implement a new Core Scheduler

---
 src/core/hle/kernel/scheduler.cpp | 455 +++++++++++++++++++-----------
 src/core/hle/kernel/scheduler.h   | 234 +++++++--------
 2 files changed, 421 insertions(+), 268 deletions(-)

diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index e8447b69a1..878aeed6d6 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -3,6 +3,8 @@
 // Refer to the license.txt file included.
 
 #include <algorithm>
+#include <set>
+#include <unordered_set>
 #include <utility>
 
 #include "common/assert.h"
@@ -17,57 +19,286 @@
 
 namespace Kernel {
 
-std::mutex Scheduler::scheduler_mutex;
+void GlobalScheduler::AddThread(SharedPtr<Thread> thread) {
+    thread_list.push_back(std::move(thread));
+}
 
-Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core)
-    : cpu_core{cpu_core}, system{system} {}
+void GlobalScheduler::RemoveThread(Thread* thread) {
+    thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
+                      thread_list.end());
+}
 
-Scheduler::~Scheduler() {
-    for (auto& thread : thread_list) {
-        thread->Stop();
+/*
+ * SelectThreads, Yield functions originally by TuxSH.
+ * licensed under GPLv2 or later under exception provided by the author.
+ */
+
+void GlobalScheduler::UnloadThread(s32 core) {
+    Scheduler& sched = Core::System::GetInstance().Scheduler(core);
+    sched.UnloadThread();
+}
+
+void GlobalScheduler::SelectThread(u32 core) {
+    auto update_thread = [](Thread* thread, Scheduler& sched) {
+        if (thread != sched.selected_thread) {
+            if (thread == nullptr) {
+                ++sched.idle_selection_count;
+            }
+            sched.selected_thread = thread;
+        }
+        sched.context_switch_pending = sched.selected_thread != sched.current_thread;
+        std::atomic_thread_fence(std::memory_order_seq_cst);
+    };
+    Scheduler& sched = Core::System::GetInstance().Scheduler(core);
+    Thread* current_thread = nullptr;
+    current_thread = scheduled_queue[core].empty() ? nullptr : scheduled_queue[core].front();
+    if (!current_thread) {
+        Thread* winner = nullptr;
+        std::set<s32> sug_cores;
+        for (auto thread : suggested_queue[core]) {
+            s32 this_core = thread->GetProcessorID();
+            Thread* thread_on_core = nullptr;
+            if (this_core >= 0) {
+                thread_on_core = scheduled_queue[this_core].front();
+            }
+            if (this_core < 0 || thread != thread_on_core) {
+                winner = thread;
+                break;
+            }
+            sug_cores.insert(this_core);
+        }
+        if (winner && winner->GetPriority() > 2) {
+            if (winner->IsRunning()) {
+                UnloadThread(winner->GetProcessorID());
+            }
+            TransferToCore(winner->GetPriority(), core, winner);
+            current_thread = winner;
+        } else {
+            for (auto& src_core : sug_cores) {
+                auto it = scheduled_queue[src_core].begin();
+                it++;
+                if (it != scheduled_queue[src_core].end()) {
+                    Thread* thread_on_core = scheduled_queue[src_core].front();
+                    Thread* to_change = *it;
+                    if (thread_on_core->IsRunning() || to_change->IsRunning()) {
+                        UnloadThread(src_core);
+                    }
+                    TransferToCore(thread_on_core->GetPriority(), core, thread_on_core);
+                    current_thread = thread_on_core;
+                }
+            }
+        }
+    }
+    update_thread(current_thread, sched);
+}
+
+void GlobalScheduler::SelectThreads() {
+    auto update_thread = [](Thread* thread, Scheduler& sched) {
+        if (thread != sched.selected_thread) {
+            if (thread == nullptr) {
+                ++sched.idle_selection_count;
+            }
+            sched.selected_thread = thread;
+        }
+        sched.context_switch_pending = sched.selected_thread != sched.current_thread;
+        std::atomic_thread_fence(std::memory_order_seq_cst);
+    };
+
+    auto& system = Core::System::GetInstance();
+
+    std::unordered_set<Thread*> picked_threads;
+    // This maintain the "current thread is on front of queue" invariant
+    std::array<Thread*, NUM_CPU_CORES> current_threads;
+    for (u32 i = 0; i < NUM_CPU_CORES; i++) {
+        Scheduler& sched = system.Scheduler(i);
+        current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
+        if (current_threads[i])
+            picked_threads.insert(current_threads[i]);
+        update_thread(current_threads[i], sched);
+    }
+
+    // Do some load-balancing. Allow second pass.
+    std::array<Thread*, NUM_CPU_CORES> current_threads_2 = current_threads;
+    for (u32 i = 0; i < NUM_CPU_CORES; i++) {
+        if (!scheduled_queue[i].empty()) {
+            continue;
+        }
+        Thread* winner = nullptr;
+        for (auto thread : suggested_queue[i]) {
+            if (thread->GetProcessorID() < 0 || thread != current_threads[i]) {
+                if (picked_threads.count(thread) == 0 && !thread->IsRunning()) {
+                    winner = thread;
+                    break;
+                }
+            }
+        }
+        if (winner) {
+            TransferToCore(winner->GetPriority(), i, winner);
+            current_threads_2[i] = winner;
+            picked_threads.insert(winner);
+        }
+    }
+
+    // See which to-be-current threads have changed & update accordingly
+    for (u32 i = 0; i < NUM_CPU_CORES; i++) {
+        Scheduler& sched = system.Scheduler(i);
+        if (current_threads_2[i] != current_threads[i]) {
+            update_thread(current_threads_2[i], sched);
+        }
+    }
+
+    reselection_pending.store(false, std::memory_order_release);
+}
+
+void GlobalScheduler::YieldThread(Thread* yielding_thread) {
+    // Note: caller should use critical section, etc.
+    u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
+    u32 priority = yielding_thread->GetPriority();
+
+    // Yield the thread
+    ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
+               "Thread yielding without being in front");
+    scheduled_queue[core_id].yield(priority);
+
+    Thread* winner = scheduled_queue[core_id].front(priority);
+    AskForReselectionOrMarkRedundant(yielding_thread, winner);
+}
+
+void GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
+    // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
+    // etc.
+    u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
+    u32 priority = yielding_thread->GetPriority();
+
+    // Yield the thread
+    ASSERT_MSG(yielding_thread == scheduled_queue[core_id].front(priority),
+               "Thread yielding without being in front");
+    scheduled_queue[core_id].yield(priority);
+
+    std::array<Thread*, NUM_CPU_CORES> current_threads;
+    for (u32 i = 0; i < NUM_CPU_CORES; i++) {
+        current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
+    }
+
+    Thread* next_thread = scheduled_queue[core_id].front(priority);
+    Thread* winner = nullptr;
+    for (auto& thread : suggested_queue[core_id]) {
+        s32 source_core = thread->GetProcessorID();
+        if (source_core >= 0) {
+            if (current_threads[source_core] != nullptr) {
+                if (thread == current_threads[source_core] ||
+                    current_threads[source_core]->GetPriority() < min_regular_priority)
+                    continue;
+            }
+            if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
+                next_thread->GetPriority() < thread->GetPriority()) {
+                if (thread->GetPriority() <= priority) {
+                    winner = thread;
+                    break;
+                }
+            }
+        }
+    }
+
+    if (winner != nullptr) {
+        if (winner != yielding_thread) {
+            if (winner->IsRunning())
+                UnloadThread(winner->GetProcessorID());
+            TransferToCore(winner->GetPriority(), core_id, winner);
+        }
+    } else {
+        winner = next_thread;
+    }
+
+    AskForReselectionOrMarkRedundant(yielding_thread, winner);
+}
+
+void GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread) {
+    // Note: caller should check if !thread.IsSchedulerOperationRedundant and use critical section,
+    // etc.
+    Thread* winner = nullptr;
+    u32 core_id = static_cast<u32>(yielding_thread->GetProcessorID());
+
+    // Remove the thread from its scheduled mlq, put it on the corresponding "suggested" one instead
+    TransferToCore(yielding_thread->GetPriority(), -1, yielding_thread);
+
+    // If the core is idle, perform load balancing, excluding the threads that have just used this
+    // function...
+    if (scheduled_queue[core_id].empty()) {
+        // Here, "current_threads" is calculated after the ""yield"", unlike yield -1
+        std::array<Thread*, NUM_CPU_CORES> current_threads;
+        for (u32 i = 0; i < NUM_CPU_CORES; i++) {
+            current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
+        }
+        for (auto& thread : suggested_queue[core_id]) {
+            s32 source_core = thread->GetProcessorID();
+            if (source_core < 0 || thread == current_threads[source_core])
+                continue;
+            if (current_threads[source_core] == nullptr ||
+                current_threads[source_core]->GetPriority() >= min_regular_priority) {
+                winner = thread;
+            }
+            break;
+        }
+        if (winner != nullptr) {
+            if (winner != yielding_thread) {
+                if (winner->IsRunning())
+                    UnloadThread(winner->GetProcessorID());
+                TransferToCore(winner->GetPriority(), core_id, winner);
+            }
+        } else {
+            winner = yielding_thread;
+        }
+    }
+
+    AskForReselectionOrMarkRedundant(yielding_thread, winner);
+}
+
+void GlobalScheduler::AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner) {
+    if (current_thread == winner) {
+        // Nintendo (not us) has a nullderef bug on current_thread->owner, but which is never
+        // triggered.
+        // current_thread->SetRedundantSchedulerOperation();
+    } else {
+        reselection_pending.store(true, std::memory_order_release);
     }
 }
 
+GlobalScheduler::~GlobalScheduler() = default;
+
+Scheduler::Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, u32 id)
+    : system(system), cpu_core(cpu_core), id(id) {}
+
+Scheduler::~Scheduler() {}
+
 bool Scheduler::HaveReadyThreads() const {
-    std::lock_guard lock{scheduler_mutex};
-    return !ready_queue.empty();
+    return system.GlobalScheduler().HaveReadyThreads(id);
 }
 
 Thread* Scheduler::GetCurrentThread() const {
     return current_thread.get();
 }
 
+Thread* Scheduler::GetSelectedThread() const {
+    return selected_thread.get();
+}
+
+void Scheduler::SelectThreads() {
+    system.GlobalScheduler().SelectThread(id);
+}
+
 u64 Scheduler::GetLastContextSwitchTicks() const {
     return last_context_switch_time;
 }
 
-Thread* Scheduler::PopNextReadyThread() {
-    Thread* next = nullptr;
-    Thread* thread = GetCurrentThread();
-
-    if (thread && thread->GetStatus() == ThreadStatus::Running) {
-        if (ready_queue.empty()) {
-            return thread;
-        }
-        // We have to do better than the current thread.
-        // This call returns null when that's not possible.
-        next = ready_queue.front();
-        if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
-            next = thread;
-        }
-    } else {
-        if (ready_queue.empty()) {
-            return nullptr;
-        }
-        next = ready_queue.front();
-    }
-
-    return next;
+void Scheduler::TryDoContextSwitch() {
+    if (context_switch_pending)
+        SwitchContext();
 }
 
-void Scheduler::SwitchContext(Thread* new_thread) {
-    Thread* previous_thread = GetCurrentThread();
-    Process* const previous_process = system.Kernel().CurrentProcess();
+void Scheduler::UnloadThread() {
+    Thread* const previous_thread = GetCurrentThread();
+    Process* const previous_process = Core::CurrentProcess();
 
     UpdateLastContextSwitchTime(previous_thread, previous_process);
 
@@ -80,23 +311,51 @@ void Scheduler::SwitchContext(Thread* new_thread) {
         if (previous_thread->GetStatus() == ThreadStatus::Running) {
             // This is only the case when a reschedule is triggered without the current thread
             // yielding execution (i.e. an event triggered, system core time-sliced, etc)
-            ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
             previous_thread->SetStatus(ThreadStatus::Ready);
         }
+        previous_thread->SetIsRunning(false);
+    }
+    current_thread = nullptr;
+}
+
+void Scheduler::SwitchContext() {
+    Thread* const previous_thread = GetCurrentThread();
+    Thread* const new_thread = GetSelectedThread();
+
+    context_switch_pending = false;
+    if (new_thread == previous_thread)
+        return;
+
+    Process* const previous_process = Core::CurrentProcess();
+
+    UpdateLastContextSwitchTime(previous_thread, previous_process);
+
+    // Save context for previous thread
+    if (previous_thread) {
+        cpu_core.SaveContext(previous_thread->GetContext());
+        // Save the TPIDR_EL0 system register in case it was modified.
+        previous_thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
+
+        if (previous_thread->GetStatus() == ThreadStatus::Running) {
+            // This is only the case when a reschedule is triggered without the current thread
+            // yielding execution (i.e. an event triggered, system core time-sliced, etc)
+            previous_thread->SetStatus(ThreadStatus::Ready);
+        }
+        previous_thread->SetIsRunning(false);
     }
 
     // Load context of new thread
     if (new_thread) {
+        ASSERT_MSG(new_thread->GetProcessorID() == this->id,
+                   "Thread must be assigned to this core.");
         ASSERT_MSG(new_thread->GetStatus() == ThreadStatus::Ready,
                    "Thread must be ready to become running.");
 
         // Cancel any outstanding wakeup events for this thread
         new_thread->CancelWakeupTimer();
-
         current_thread = new_thread;
-
-        ready_queue.remove(new_thread, new_thread->GetPriority());
         new_thread->SetStatus(ThreadStatus::Running);
+        new_thread->SetIsRunning(true);
 
         auto* const thread_owner_process = current_thread->GetOwnerProcess();
         if (previous_process != thread_owner_process) {
@@ -116,7 +375,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
 
 void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
     const u64 prev_switch_ticks = last_context_switch_time;
-    const u64 most_recent_switch_ticks = system.CoreTiming().GetTicks();
+    const u64 most_recent_switch_ticks = Core::System::GetInstance().CoreTiming().GetTicks();
     const u64 update_ticks = most_recent_switch_ticks - prev_switch_ticks;
 
     if (thread != nullptr) {
@@ -130,124 +389,4 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
     last_context_switch_time = most_recent_switch_ticks;
 }
 
-void Scheduler::Reschedule() {
-    std::lock_guard lock{scheduler_mutex};
-
-    Thread* cur = GetCurrentThread();
-    Thread* next = PopNextReadyThread();
-
-    if (cur && next) {
-        LOG_TRACE(Kernel, "context switch {} -> {}", cur->GetObjectId(), next->GetObjectId());
-    } else if (cur) {
-        LOG_TRACE(Kernel, "context switch {} -> idle", cur->GetObjectId());
-    } else if (next) {
-        LOG_TRACE(Kernel, "context switch idle -> {}", next->GetObjectId());
-    }
-
-    SwitchContext(next);
-}
-
-void Scheduler::AddThread(SharedPtr<Thread> thread) {
-    std::lock_guard lock{scheduler_mutex};
-
-    thread_list.push_back(std::move(thread));
-}
-
-void Scheduler::RemoveThread(Thread* thread) {
-    std::lock_guard lock{scheduler_mutex};
-
-    thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
-                      thread_list.end());
-}
-
-void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
-    std::lock_guard lock{scheduler_mutex};
-
-    ASSERT(thread->GetStatus() == ThreadStatus::Ready);
-    ready_queue.add(thread, priority);
-}
-
-void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
-    std::lock_guard lock{scheduler_mutex};
-
-    ASSERT(thread->GetStatus() == ThreadStatus::Ready);
-    ready_queue.remove(thread, priority);
-}
-
-void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
-    std::lock_guard lock{scheduler_mutex};
-    if (thread->GetPriority() == priority) {
-        return;
-    }
-
-    // If thread was ready, adjust queues
-    if (thread->GetStatus() == ThreadStatus::Ready)
-        ready_queue.adjust(thread, thread->GetPriority(), priority);
-}
-
-Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
-    std::lock_guard lock{scheduler_mutex};
-
-    const u32 mask = 1U << core;
-    for (auto* thread : ready_queue) {
-        if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
-            return thread;
-        }
-    }
-    return nullptr;
-}
-
-void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
-    ASSERT(thread != nullptr);
-    // Avoid yielding if the thread isn't even running.
-    ASSERT(thread->GetStatus() == ThreadStatus::Running);
-
-    // Sanity check that the priority is valid
-    ASSERT(thread->GetPriority() < THREADPRIO_COUNT);
-
-    // Yield this thread -- sleep for zero time and force reschedule to different thread
-    GetCurrentThread()->Sleep(0);
-}
-
-void Scheduler::YieldWithLoadBalancing(Thread* thread) {
-    ASSERT(thread != nullptr);
-    const auto priority = thread->GetPriority();
-    const auto core = static_cast<u32>(thread->GetProcessorID());
-
-    // Avoid yielding if the thread isn't even running.
-    ASSERT(thread->GetStatus() == ThreadStatus::Running);
-
-    // Sanity check that the priority is valid
-    ASSERT(priority < THREADPRIO_COUNT);
-
-    // Sleep for zero time to be able to force reschedule to different thread
-    GetCurrentThread()->Sleep(0);
-
-    Thread* suggested_thread = nullptr;
-
-    // Search through all of the cpu cores (except this one) for a suggested thread.
-    // Take the first non-nullptr one
-    for (unsigned cur_core = 0; cur_core < Core::NUM_CPU_CORES; ++cur_core) {
-        const auto res =
-            system.CpuCore(cur_core).Scheduler().GetNextSuggestedThread(core, priority);
-
-        // If scheduler provides a suggested thread
-        if (res != nullptr) {
-            // And its better than the current suggested thread (or is the first valid one)
-            if (suggested_thread == nullptr ||
-                suggested_thread->GetPriority() > res->GetPriority()) {
-                suggested_thread = res;
-            }
-        }
-    }
-
-    // If a suggested thread was found, queue that for this core
-    if (suggested_thread != nullptr)
-        suggested_thread->ChangeCore(core, suggested_thread->GetAffinityMask());
-}
-
-void Scheduler::YieldAndWaitForLoadBalancing(Thread* thread) {
-    UNIMPLEMENTED_MSG("Wait for load balancing thread yield type is not implemented!");
-}
-
 } // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index b29bf7be85..50fa7376b4 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -20,124 +20,141 @@ namespace Kernel {
 
 class Process;
 
-class Scheduler final {
+class GlobalScheduler final {
 public:
-    explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core);
-    ~Scheduler();
-
-    /// Returns whether there are any threads that are ready to run.
-    bool HaveReadyThreads() const;
-
-    /// Reschedules to the next available thread (call after current thread is suspended)
-    void Reschedule();
-
-    /// Gets the current running thread
-    Thread* GetCurrentThread() const;
-
-    /// Gets the timestamp for the last context switch in ticks.
-    u64 GetLastContextSwitchTicks() const;
+    static constexpr u32 NUM_CPU_CORES = 4;
 
+    GlobalScheduler() {
+        reselection_pending = false;
+    }
+    ~GlobalScheduler();
     /// Adds a new thread to the scheduler
     void AddThread(SharedPtr<Thread> thread);
 
     /// Removes a thread from the scheduler
     void RemoveThread(Thread* thread);
 
-    /// Schedules a thread that has become "ready"
-    void ScheduleThread(Thread* thread, u32 priority);
-
-    /// Unschedules a thread that was already scheduled
-    void UnscheduleThread(Thread* thread, u32 priority);
-
-    /// Sets the priority of a thread in the scheduler
-    void SetThreadPriority(Thread* thread, u32 priority);
-
-    /// Gets the next suggested thread for load balancing
-    Thread* GetNextSuggestedThread(u32 core, u32 minimum_priority) const;
-
-    /**
-     * YieldWithoutLoadBalancing -- analogous to normal yield on a system
-     * Moves the thread to the end of the ready queue for its priority, and then reschedules the
-     * system to the new head of the queue.
-     *
-     * Example (Single Core -- but can be extrapolated to multi):
-     * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC (->exec order->)
-     * Currently Running: ThreadR
-     *
-     * ThreadR calls YieldWithoutLoadBalancing
-     *
-     * ThreadR is moved to the end of ready_queue[prio=0]:
-     * ready_queue[prio=0]: ThreadA, ThreadB, ThreadC, ThreadR (->exec order->)
-     * Currently Running: Nothing
-     *
-     * System is rescheduled (ThreadA is popped off of queue):
-     * ready_queue[prio=0]: ThreadB, ThreadC, ThreadR (->exec order->)
-     * Currently Running: ThreadA
-     *
-     * If the queue is empty at time of call, no yielding occurs. This does not cross between cores
-     * or priorities at all.
-     */
-    void YieldWithoutLoadBalancing(Thread* thread);
-
-    /**
-     * YieldWithLoadBalancing -- yield but with better selection of the new running thread
-     * Moves the current thread to the end of the ready queue for its priority, then selects a
-     * 'suggested thread' (a thread on a different core that could run on this core) from the
-     * scheduler, changes its core, and reschedules the current core to that thread.
-     *
-     * Example (Dual Core -- can be extrapolated to Quad Core, this is just normal yield if it were
-     * single core):
-     * ready_queue[core=0][prio=0]: ThreadA, ThreadB (affinities not pictured as irrelevant
-     * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only]
-     * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1
-     *
-     * ThreadQ calls YieldWithLoadBalancing
-     *
-     * ThreadQ is moved to the end of ready_queue[core=0][prio=0]:
-     * ready_queue[core=0][prio=0]: ThreadA, ThreadB
-     * ready_queue[core=1][prio=0]: ThreadC[affinity=both], ThreadD[affinity=core1only]
-     * Currently Running: ThreadQ on Core 0 || ThreadP on Core 1
-     *
-     * A list of suggested threads for each core is compiled
-     * Suggested Threads: {ThreadC on Core 1}
-     * If this were quad core (as the switch is), there could be between 0 and 3 threads in this
-     * list. If there are more than one, the thread is selected by highest prio.
-     *
-     * ThreadC is core changed to Core 0:
-     * ready_queue[core=0][prio=0]: ThreadC, ThreadA, ThreadB, ThreadQ
-     * ready_queue[core=1][prio=0]: ThreadD
-     * Currently Running: None on Core 0 || ThreadP on Core 1
-     *
-     * System is rescheduled (ThreadC is popped off of queue):
-     * ready_queue[core=0][prio=0]: ThreadA, ThreadB, ThreadQ
-     * ready_queue[core=1][prio=0]: ThreadD
-     * Currently Running: ThreadC on Core 0 || ThreadP on Core 1
-     *
-     * If no suggested threads can be found this will behave just as normal yield. If there are
-     * multiple candidates for the suggested thread on a core, the highest prio is taken.
-     */
-    void YieldWithLoadBalancing(Thread* thread);
-
-    /// Currently unknown -- asserts as unimplemented on call
-    void YieldAndWaitForLoadBalancing(Thread* thread);
-
     /// Returns a list of all threads managed by the scheduler
     const std::vector<SharedPtr<Thread>>& GetThreadList() const {
         return thread_list;
     }
 
-private:
-    /**
-     * Pops and returns the next thread from the thread queue
-     * @return A pointer to the next ready thread
-     */
-    Thread* PopNextReadyThread();
+    void Suggest(u32 priority, u32 core, Thread* thread) {
+        suggested_queue[core].add(thread, priority);
+    }
 
+    void Unsuggest(u32 priority, u32 core, Thread* thread) {
+        suggested_queue[core].remove(thread, priority);
+    }
+
+    void Schedule(u32 priority, u32 core, Thread* thread) {
+        ASSERT_MSG(thread->GetProcessorID() == core,
+                   "Thread must be assigned to this core.");
+        scheduled_queue[core].add(thread, priority);
+    }
+
+    void SchedulePrepend(u32 priority, u32 core, Thread* thread) {
+        ASSERT_MSG(thread->GetProcessorID() == core,
+                   "Thread must be assigned to this core.");
+        scheduled_queue[core].add(thread, priority, false);
+    }
+
+    void Reschedule(u32 priority, u32 core, Thread* thread) {
+        scheduled_queue[core].remove(thread, priority);
+        scheduled_queue[core].add(thread, priority);
+    }
+
+    void Unschedule(u32 priority, u32 core, Thread* thread) {
+        scheduled_queue[core].remove(thread, priority);
+    }
+
+    void TransferToCore(u32 priority, s32 destination_core, Thread* thread) {
+        bool schedulable = thread->GetPriority() < THREADPRIO_COUNT;
+        s32 source_core = thread->GetProcessorID();
+        if (source_core == destination_core || !schedulable)
+            return;
+        thread->SetProcessorID(destination_core);
+        if (source_core >= 0)
+            Unschedule(priority, source_core, thread);
+        if (destination_core >= 0) {
+            Unsuggest(priority, destination_core, thread);
+            Schedule(priority, destination_core, thread);
+        }
+        if (source_core >= 0)
+            Suggest(priority, source_core, thread);
+    }
+
+    void UnloadThread(s32 core);
+
+    void SelectThreads();
+    void SelectThread(u32 core);
+
+    bool HaveReadyThreads(u32 core_id) {
+        return !scheduled_queue[core_id].empty();
+    }
+
+    void YieldThread(Thread* thread);
+    void YieldThreadAndBalanceLoad(Thread* thread);
+    void YieldThreadAndWaitForLoadBalancing(Thread* thread);
+
+    u32 CpuCoresCount() const {
+        return NUM_CPU_CORES;
+    }
+
+    void SetReselectionPending() {
+        reselection_pending.store(true, std::memory_order_release);
+    }
+
+    bool IsReselectionPending() {
+        return reselection_pending.load(std::memory_order_acquire);
+    }
+
+private:
+    void AskForReselectionOrMarkRedundant(Thread* current_thread, Thread* winner);
+
+    static constexpr u32 min_regular_priority = 2;
+    std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> scheduled_queue;
+    std::array<Common::MultiLevelQueue<Thread*, THREADPRIO_COUNT>, NUM_CPU_CORES> suggested_queue;
+    std::atomic<bool> reselection_pending;
+
+    /// Lists all thread ids that aren't deleted/etc.
+    std::vector<SharedPtr<Thread>> thread_list;
+};
+
+class Scheduler final {
+public:
+    explicit Scheduler(Core::System& system, Core::ARM_Interface& cpu_core, const u32 id);
+    ~Scheduler();
+
+    /// Returns whether there are any threads that are ready to run.
+    bool HaveReadyThreads() const;
+
+    /// Reschedules to the next available thread (call after current thread is suspended)
+    void TryDoContextSwitch();
+
+    void UnloadThread();
+
+    void SelectThreads();
+
+    /// Gets the current running thread
+    Thread* GetCurrentThread() const;
+
+    Thread* GetSelectedThread() const;
+
+    /// Gets the timestamp for the last context switch in ticks.
+    u64 GetLastContextSwitchTicks() const;
+
+    bool ContextSwitchPending() const {
+        return context_switch_pending;
+    }
+
+private:
+    friend class GlobalScheduler;
     /**
      * Switches the CPU's active thread context to that of the specified thread
      * @param new_thread The thread to switch to
      */
-    void SwitchContext(Thread* new_thread);
+    void SwitchContext();
 
     /**
      * Called on every context switch to update the internal timestamp
@@ -152,19 +169,16 @@ private:
      */
     void UpdateLastContextSwitchTime(Thread* thread, Process* process);
 
-    /// Lists all thread ids that aren't deleted/etc.
-    std::vector<SharedPtr<Thread>> thread_list;
-
-    /// Lists only ready thread ids.
-    Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
-
     SharedPtr<Thread> current_thread = nullptr;
-
-    Core::ARM_Interface& cpu_core;
-    u64 last_context_switch_time = 0;
+    SharedPtr<Thread> selected_thread = nullptr;
 
     Core::System& system;
-    static std::mutex scheduler_mutex;
+    Core::ARM_Interface& cpu_core;
+    u64 last_context_switch_time = 0;
+    u64 idle_selection_count = 0;
+    const u32 id;
+
+    bool context_switch_pending = false;
 };
 
 } // namespace Kernel