mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-15 22:54:00 +00:00
Merge pull request #10859 from liamwhite/no-more-atomic-wait
general: remove atomic signal and wait
This commit is contained in:
commit
a674022434
9 changed files with 26 additions and 40 deletions
|
@ -55,7 +55,7 @@ public:
|
||||||
is_set = false;
|
is_set = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] bool IsSet() {
|
[[nodiscard]] bool IsSet() const {
|
||||||
return is_set;
|
return is_set;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,14 +43,10 @@ void Nvnflinger::SplitVSync(std::stop_token stop_token) {
|
||||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||||
|
|
||||||
while (!stop_token.stop_requested()) {
|
while (!stop_token.stop_requested()) {
|
||||||
vsync_signal.wait(false);
|
vsync_signal.Wait();
|
||||||
vsync_signal.store(false);
|
|
||||||
|
|
||||||
guard->lock();
|
|
||||||
|
|
||||||
|
const auto lock_guard = Lock();
|
||||||
Compose();
|
Compose();
|
||||||
|
|
||||||
guard->unlock();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,9 +65,8 @@ Nvnflinger::Nvnflinger(Core::System& system_, HosBinderDriverServer& hos_binder_
|
||||||
"ScreenComposition",
|
"ScreenComposition",
|
||||||
[this](std::uintptr_t, s64 time,
|
[this](std::uintptr_t, s64 time,
|
||||||
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
std::chrono::nanoseconds ns_late) -> std::optional<std::chrono::nanoseconds> {
|
||||||
vsync_signal.store(true);
|
|
||||||
{ const auto lock_guard = Lock(); }
|
{ const auto lock_guard = Lock(); }
|
||||||
vsync_signal.notify_one();
|
vsync_signal.Set();
|
||||||
return std::chrono::nanoseconds(GetNextTicks());
|
return std::chrono::nanoseconds(GetNextTicks());
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -97,8 +92,7 @@ Nvnflinger::~Nvnflinger() {
|
||||||
if (system.IsMulticore()) {
|
if (system.IsMulticore()) {
|
||||||
system.CoreTiming().UnscheduleEvent(multi_composition_event, {});
|
system.CoreTiming().UnscheduleEvent(multi_composition_event, {});
|
||||||
vsync_thread.request_stop();
|
vsync_thread.request_stop();
|
||||||
vsync_signal.store(true);
|
vsync_signal.Set();
|
||||||
vsync_signal.notify_all();
|
|
||||||
} else {
|
} else {
|
||||||
system.CoreTiming().UnscheduleEvent(single_composition_event, {});
|
system.CoreTiming().UnscheduleEvent(single_composition_event, {});
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/polyfill_thread.h"
|
#include "common/polyfill_thread.h"
|
||||||
|
#include "common/thread.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
#include "core/hle/service/kernel_helpers.h"
|
#include "core/hle/service/kernel_helpers.h"
|
||||||
|
|
||||||
|
@ -143,7 +144,7 @@ private:
|
||||||
|
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
|
|
||||||
std::atomic<bool> vsync_signal;
|
Common::Event vsync_signal;
|
||||||
|
|
||||||
std::jthread vsync_thread;
|
std::jthread vsync_thread;
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ ServerManager::~ServerManager() {
|
||||||
m_event->Signal();
|
m_event->Signal();
|
||||||
|
|
||||||
// Wait for processing to stop.
|
// Wait for processing to stop.
|
||||||
m_stopped.wait(false);
|
m_stopped.Wait();
|
||||||
m_threads.clear();
|
m_threads.clear();
|
||||||
|
|
||||||
// Clean up ports.
|
// Clean up ports.
|
||||||
|
@ -182,10 +182,7 @@ void ServerManager::StartAdditionalHostThreads(const char* name, size_t num_thre
|
||||||
}
|
}
|
||||||
|
|
||||||
Result ServerManager::LoopProcess() {
|
Result ServerManager::LoopProcess() {
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT({ m_stopped.Set(); });
|
||||||
m_stopped.store(true);
|
|
||||||
m_stopped.notify_all();
|
|
||||||
});
|
|
||||||
|
|
||||||
R_RETURN(this->LoopProcessImpl());
|
R_RETURN(this->LoopProcessImpl());
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <map>
|
#include <map>
|
||||||
|
@ -12,6 +11,7 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "common/polyfill_thread.h"
|
#include "common/polyfill_thread.h"
|
||||||
|
#include "common/thread.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
#include "core/hle/service/mutex.h"
|
#include "core/hle/service/mutex.h"
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ private:
|
||||||
std::list<RequestState> m_deferrals{};
|
std::list<RequestState> m_deferrals{};
|
||||||
|
|
||||||
// Host state tracking
|
// Host state tracking
|
||||||
std::atomic<bool> m_stopped{};
|
Common::Event m_stopped{};
|
||||||
std::vector<std::jthread> m_threads{};
|
std::vector<std::jthread> m_threads{};
|
||||||
std::stop_source m_stop_source{};
|
std::stop_source m_stop_source{};
|
||||||
};
|
};
|
||||||
|
|
|
@ -75,15 +75,9 @@ void MasterSemaphore::Refresh() {
|
||||||
|
|
||||||
void MasterSemaphore::Wait(u64 tick) {
|
void MasterSemaphore::Wait(u64 tick) {
|
||||||
if (!semaphore) {
|
if (!semaphore) {
|
||||||
// If we don't support timeline semaphores, use an atomic wait
|
// If we don't support timeline semaphores, wait for the value normally
|
||||||
while (true) {
|
std::unique_lock lk{free_mutex};
|
||||||
u64 current_value = gpu_tick.load(std::memory_order_relaxed);
|
free_cv.wait(lk, [&] { return gpu_tick.load(std::memory_order_relaxed) >= tick; });
|
||||||
if (current_value >= tick) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
gpu_tick.wait(current_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,11 +192,13 @@ void MasterSemaphore::WaitThread(std::stop_token token) {
|
||||||
|
|
||||||
fence.Wait();
|
fence.Wait();
|
||||||
fence.Reset();
|
fence.Reset();
|
||||||
gpu_tick.store(host_tick);
|
|
||||||
gpu_tick.notify_all();
|
|
||||||
|
|
||||||
|
{
|
||||||
std::scoped_lock lock{free_mutex};
|
std::scoped_lock lock{free_mutex};
|
||||||
free_queue.push_front(std::move(fence));
|
free_queue.push_front(std::move(fence));
|
||||||
|
gpu_tick.store(host_tick);
|
||||||
|
}
|
||||||
|
free_cv.notify_one();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -72,6 +72,7 @@ private:
|
||||||
std::atomic<u64> current_tick{1}; ///< Current logical tick.
|
std::atomic<u64> current_tick{1}; ///< Current logical tick.
|
||||||
std::mutex wait_mutex;
|
std::mutex wait_mutex;
|
||||||
std::mutex free_mutex;
|
std::mutex free_mutex;
|
||||||
|
std::condition_variable free_cv;
|
||||||
std::condition_variable_any wait_cv;
|
std::condition_variable_any wait_cv;
|
||||||
std::queue<Waitable> wait_queue; ///< Queue for the fences to be waited on by the wait thread.
|
std::queue<Waitable> wait_queue; ///< Queue for the fences to be waited on by the wait thread.
|
||||||
std::deque<vk::Fence> free_queue; ///< Holds available fences for submission.
|
std::deque<vk::Fence> free_queue; ///< Holds available fences for submission.
|
||||||
|
|
|
@ -105,14 +105,12 @@ void EmuThread::run() {
|
||||||
std::unique_lock lk{m_should_run_mutex};
|
std::unique_lock lk{m_should_run_mutex};
|
||||||
if (m_should_run) {
|
if (m_should_run) {
|
||||||
m_system.Run();
|
m_system.Run();
|
||||||
m_is_running.store(true);
|
m_stopped.Reset();
|
||||||
m_is_running.notify_all();
|
|
||||||
|
|
||||||
Common::CondvarWait(m_should_run_cv, lk, stop_token, [&] { return !m_should_run; });
|
Common::CondvarWait(m_should_run_cv, lk, stop_token, [&] { return !m_should_run; });
|
||||||
} else {
|
} else {
|
||||||
m_system.Pause();
|
m_system.Pause();
|
||||||
m_is_running.store(false);
|
m_stopped.Set();
|
||||||
m_is_running.notify_all();
|
|
||||||
|
|
||||||
EmulationPaused(lk);
|
EmulationPaused(lk);
|
||||||
Common::CondvarWait(m_should_run_cv, lk, stop_token, [&] { return m_should_run; });
|
Common::CondvarWait(m_should_run_cv, lk, stop_token, [&] { return m_should_run; });
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
@ -88,7 +87,7 @@ public:
|
||||||
|
|
||||||
// Wait until paused, if pausing.
|
// Wait until paused, if pausing.
|
||||||
if (!should_run) {
|
if (!should_run) {
|
||||||
m_is_running.wait(true);
|
m_stopped.Wait();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +96,7 @@ public:
|
||||||
* @return True if the emulation thread is running, otherwise false
|
* @return True if the emulation thread is running, otherwise false
|
||||||
*/
|
*/
|
||||||
bool IsRunning() const {
|
bool IsRunning() const {
|
||||||
return m_is_running.load() || m_should_run;
|
return m_should_run;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -118,7 +117,7 @@ private:
|
||||||
std::stop_source m_stop_source;
|
std::stop_source m_stop_source;
|
||||||
std::mutex m_should_run_mutex;
|
std::mutex m_should_run_mutex;
|
||||||
std::condition_variable_any m_should_run_cv;
|
std::condition_variable_any m_should_run_cv;
|
||||||
std::atomic<bool> m_is_running{false};
|
Common::Event m_stopped;
|
||||||
bool m_should_run{true};
|
bool m_should_run{true};
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
|
|
Loading…
Reference in a new issue