mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-15 22:54:00 +00:00
Merge pull request #3396 from FernandoS27/prometheus-1
Implement SpinLocks, Fibers and a Host Timer
This commit is contained in:
commit
0ea4a8bcc4
22 changed files with 1646 additions and 3 deletions
|
@ -110,6 +110,8 @@ add_library(common STATIC
|
||||||
common_types.h
|
common_types.h
|
||||||
dynamic_library.cpp
|
dynamic_library.cpp
|
||||||
dynamic_library.h
|
dynamic_library.h
|
||||||
|
fiber.cpp
|
||||||
|
fiber.h
|
||||||
file_util.cpp
|
file_util.cpp
|
||||||
file_util.h
|
file_util.h
|
||||||
hash.h
|
hash.h
|
||||||
|
@ -143,6 +145,8 @@ add_library(common STATIC
|
||||||
scm_rev.cpp
|
scm_rev.cpp
|
||||||
scm_rev.h
|
scm_rev.h
|
||||||
scope_exit.h
|
scope_exit.h
|
||||||
|
spin_lock.cpp
|
||||||
|
spin_lock.h
|
||||||
string_util.cpp
|
string_util.cpp
|
||||||
string_util.h
|
string_util.h
|
||||||
swap.h
|
swap.h
|
||||||
|
@ -163,6 +167,8 @@ add_library(common STATIC
|
||||||
vector_math.h
|
vector_math.h
|
||||||
virtual_buffer.cpp
|
virtual_buffer.cpp
|
||||||
virtual_buffer.h
|
virtual_buffer.h
|
||||||
|
wall_clock.cpp
|
||||||
|
wall_clock.h
|
||||||
web_result.h
|
web_result.h
|
||||||
zstd_compression.cpp
|
zstd_compression.cpp
|
||||||
zstd_compression.h
|
zstd_compression.h
|
||||||
|
@ -173,12 +179,15 @@ if(ARCHITECTURE_x86_64)
|
||||||
PRIVATE
|
PRIVATE
|
||||||
x64/cpu_detect.cpp
|
x64/cpu_detect.cpp
|
||||||
x64/cpu_detect.h
|
x64/cpu_detect.h
|
||||||
|
x64/native_clock.cpp
|
||||||
|
x64/native_clock.h
|
||||||
x64/xbyak_abi.h
|
x64/xbyak_abi.h
|
||||||
x64/xbyak_util.h
|
x64/xbyak_util.h
|
||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
create_target_directory_groups(common)
|
create_target_directory_groups(common)
|
||||||
|
find_package(Boost 1.71 COMPONENTS context headers REQUIRED)
|
||||||
|
|
||||||
target_link_libraries(common PUBLIC Boost::boost fmt::fmt microprofile)
|
target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile)
|
||||||
target_link_libraries(common PRIVATE lz4::lz4 zstd::zstd xbyak)
|
target_link_libraries(common PRIVATE lz4::lz4 zstd::zstd xbyak)
|
||||||
|
|
226
src/common/fiber.cpp
Normal file
226
src/common/fiber.cpp
Normal file
|
@ -0,0 +1,226 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/fiber.h"
|
||||||
|
#if defined(_WIN32) || defined(WIN32)
|
||||||
|
#include <windows.h>
|
||||||
|
#else
|
||||||
|
#include <boost/context/detail/fcontext.hpp>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
constexpr std::size_t default_stack_size = 256 * 1024; // 256kb
|
||||||
|
|
||||||
|
#if defined(_WIN32) || defined(WIN32)
|
||||||
|
|
||||||
|
struct Fiber::FiberImpl {
|
||||||
|
LPVOID handle = nullptr;
|
||||||
|
LPVOID rewind_handle = nullptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
void Fiber::Start() {
|
||||||
|
ASSERT(previous_fiber != nullptr);
|
||||||
|
previous_fiber->guard.unlock();
|
||||||
|
previous_fiber.reset();
|
||||||
|
entry_point(start_parameter);
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::OnRewind() {
|
||||||
|
ASSERT(impl->handle != nullptr);
|
||||||
|
DeleteFiber(impl->handle);
|
||||||
|
impl->handle = impl->rewind_handle;
|
||||||
|
impl->rewind_handle = nullptr;
|
||||||
|
rewind_point(rewind_parameter);
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::FiberStartFunc(void* fiber_parameter) {
|
||||||
|
auto fiber = static_cast<Fiber*>(fiber_parameter);
|
||||||
|
fiber->Start();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::RewindStartFunc(void* fiber_parameter) {
|
||||||
|
auto fiber = static_cast<Fiber*>(fiber_parameter);
|
||||||
|
fiber->OnRewind();
|
||||||
|
}
|
||||||
|
|
||||||
|
Fiber::Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter)
|
||||||
|
: entry_point{std::move(entry_point_func)}, start_parameter{start_parameter} {
|
||||||
|
impl = std::make_unique<FiberImpl>();
|
||||||
|
impl->handle = CreateFiber(default_stack_size, &FiberStartFunc, this);
|
||||||
|
}
|
||||||
|
|
||||||
|
Fiber::Fiber() {
|
||||||
|
impl = std::make_unique<FiberImpl>();
|
||||||
|
}
|
||||||
|
|
||||||
|
Fiber::~Fiber() {
|
||||||
|
if (released) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Make sure the Fiber is not being used
|
||||||
|
const bool locked = guard.try_lock();
|
||||||
|
ASSERT_MSG(locked, "Destroying a fiber that's still running");
|
||||||
|
if (locked) {
|
||||||
|
guard.unlock();
|
||||||
|
}
|
||||||
|
DeleteFiber(impl->handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::Exit() {
|
||||||
|
ASSERT_MSG(is_thread_fiber, "Exitting non main thread fiber");
|
||||||
|
if (!is_thread_fiber) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ConvertFiberToThread();
|
||||||
|
guard.unlock();
|
||||||
|
released = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::SetRewindPoint(std::function<void(void*)>&& rewind_func, void* start_parameter) {
|
||||||
|
rewind_point = std::move(rewind_func);
|
||||||
|
rewind_parameter = start_parameter;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::Rewind() {
|
||||||
|
ASSERT(rewind_point);
|
||||||
|
ASSERT(impl->rewind_handle == nullptr);
|
||||||
|
impl->rewind_handle = CreateFiber(default_stack_size, &RewindStartFunc, this);
|
||||||
|
SwitchToFiber(impl->rewind_handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::YieldTo(std::shared_ptr<Fiber>& from, std::shared_ptr<Fiber>& to) {
|
||||||
|
ASSERT_MSG(from != nullptr, "Yielding fiber is null!");
|
||||||
|
ASSERT_MSG(to != nullptr, "Next fiber is null!");
|
||||||
|
to->guard.lock();
|
||||||
|
to->previous_fiber = from;
|
||||||
|
SwitchToFiber(to->impl->handle);
|
||||||
|
ASSERT(from->previous_fiber != nullptr);
|
||||||
|
from->previous_fiber->guard.unlock();
|
||||||
|
from->previous_fiber.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<Fiber> Fiber::ThreadToFiber() {
|
||||||
|
std::shared_ptr<Fiber> fiber = std::shared_ptr<Fiber>{new Fiber()};
|
||||||
|
fiber->guard.lock();
|
||||||
|
fiber->impl->handle = ConvertThreadToFiber(nullptr);
|
||||||
|
fiber->is_thread_fiber = true;
|
||||||
|
return fiber;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
struct Fiber::FiberImpl {
|
||||||
|
alignas(64) std::array<u8, default_stack_size> stack;
|
||||||
|
u8* stack_limit;
|
||||||
|
alignas(64) std::array<u8, default_stack_size> rewind_stack;
|
||||||
|
u8* rewind_stack_limit;
|
||||||
|
boost::context::detail::fcontext_t context;
|
||||||
|
boost::context::detail::fcontext_t rewind_context;
|
||||||
|
};
|
||||||
|
|
||||||
|
void Fiber::Start(boost::context::detail::transfer_t& transfer) {
|
||||||
|
ASSERT(previous_fiber != nullptr);
|
||||||
|
previous_fiber->impl->context = transfer.fctx;
|
||||||
|
previous_fiber->guard.unlock();
|
||||||
|
previous_fiber.reset();
|
||||||
|
entry_point(start_parameter);
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::OnRewind([[maybe_unused]] boost::context::detail::transfer_t& transfer) {
|
||||||
|
ASSERT(impl->context != nullptr);
|
||||||
|
impl->context = impl->rewind_context;
|
||||||
|
impl->rewind_context = nullptr;
|
||||||
|
u8* tmp = impl->stack_limit;
|
||||||
|
impl->stack_limit = impl->rewind_stack_limit;
|
||||||
|
impl->rewind_stack_limit = tmp;
|
||||||
|
rewind_point(rewind_parameter);
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::FiberStartFunc(boost::context::detail::transfer_t transfer) {
|
||||||
|
auto fiber = static_cast<Fiber*>(transfer.data);
|
||||||
|
fiber->Start(transfer);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::RewindStartFunc(boost::context::detail::transfer_t transfer) {
|
||||||
|
auto fiber = static_cast<Fiber*>(transfer.data);
|
||||||
|
fiber->OnRewind(transfer);
|
||||||
|
}
|
||||||
|
|
||||||
|
Fiber::Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter)
|
||||||
|
: entry_point{std::move(entry_point_func)}, start_parameter{start_parameter} {
|
||||||
|
impl = std::make_unique<FiberImpl>();
|
||||||
|
impl->stack_limit = impl->stack.data();
|
||||||
|
impl->rewind_stack_limit = impl->rewind_stack.data();
|
||||||
|
u8* stack_base = impl->stack_limit + default_stack_size;
|
||||||
|
impl->context =
|
||||||
|
boost::context::detail::make_fcontext(stack_base, impl->stack.size(), FiberStartFunc);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::SetRewindPoint(std::function<void(void*)>&& rewind_func, void* start_parameter) {
|
||||||
|
rewind_point = std::move(rewind_func);
|
||||||
|
rewind_parameter = start_parameter;
|
||||||
|
}
|
||||||
|
|
||||||
|
Fiber::Fiber() {
|
||||||
|
impl = std::make_unique<FiberImpl>();
|
||||||
|
}
|
||||||
|
|
||||||
|
Fiber::~Fiber() {
|
||||||
|
if (released) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Make sure the Fiber is not being used
|
||||||
|
const bool locked = guard.try_lock();
|
||||||
|
ASSERT_MSG(locked, "Destroying a fiber that's still running");
|
||||||
|
if (locked) {
|
||||||
|
guard.unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::Exit() {
|
||||||
|
|
||||||
|
ASSERT_MSG(is_thread_fiber, "Exitting non main thread fiber");
|
||||||
|
if (!is_thread_fiber) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
guard.unlock();
|
||||||
|
released = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::Rewind() {
|
||||||
|
ASSERT(rewind_point);
|
||||||
|
ASSERT(impl->rewind_context == nullptr);
|
||||||
|
u8* stack_base = impl->rewind_stack_limit + default_stack_size;
|
||||||
|
impl->rewind_context =
|
||||||
|
boost::context::detail::make_fcontext(stack_base, impl->stack.size(), RewindStartFunc);
|
||||||
|
boost::context::detail::jump_fcontext(impl->rewind_context, this);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Fiber::YieldTo(std::shared_ptr<Fiber>& from, std::shared_ptr<Fiber>& to) {
|
||||||
|
ASSERT_MSG(from != nullptr, "Yielding fiber is null!");
|
||||||
|
ASSERT_MSG(to != nullptr, "Next fiber is null!");
|
||||||
|
to->guard.lock();
|
||||||
|
to->previous_fiber = from;
|
||||||
|
auto transfer = boost::context::detail::jump_fcontext(to->impl->context, to.get());
|
||||||
|
ASSERT(from->previous_fiber != nullptr);
|
||||||
|
from->previous_fiber->impl->context = transfer.fctx;
|
||||||
|
from->previous_fiber->guard.unlock();
|
||||||
|
from->previous_fiber.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<Fiber> Fiber::ThreadToFiber() {
|
||||||
|
std::shared_ptr<Fiber> fiber = std::shared_ptr<Fiber>{new Fiber()};
|
||||||
|
fiber->guard.lock();
|
||||||
|
fiber->is_thread_fiber = true;
|
||||||
|
return fiber;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
} // namespace Common
|
92
src/common/fiber.h
Normal file
92
src/common/fiber.h
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/spin_lock.h"
|
||||||
|
|
||||||
|
#if !defined(_WIN32) && !defined(WIN32)
|
||||||
|
namespace boost::context::detail {
|
||||||
|
struct transfer_t;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fiber class
|
||||||
|
* a fiber is a userspace thread with it's own context. They can be used to
|
||||||
|
* implement coroutines, emulated threading systems and certain asynchronous
|
||||||
|
* patterns.
|
||||||
|
*
|
||||||
|
* This class implements fibers at a low level, thus allowing greater freedom
|
||||||
|
* to implement such patterns. This fiber class is 'threadsafe' only one fiber
|
||||||
|
* can be running at a time and threads will be locked while trying to yield to
|
||||||
|
* a running fiber until it yields. WARNING exchanging two running fibers between
|
||||||
|
* threads will cause a deadlock. In order to prevent a deadlock, each thread should
|
||||||
|
* have an intermediary fiber, you switch to the intermediary fiber of the current
|
||||||
|
* thread and then from it switch to the expected fiber. This way you can exchange
|
||||||
|
* 2 fibers within 2 different threads.
|
||||||
|
*/
|
||||||
|
class Fiber {
|
||||||
|
public:
|
||||||
|
Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter);
|
||||||
|
~Fiber();
|
||||||
|
|
||||||
|
Fiber(const Fiber&) = delete;
|
||||||
|
Fiber& operator=(const Fiber&) = delete;
|
||||||
|
|
||||||
|
Fiber(Fiber&&) = default;
|
||||||
|
Fiber& operator=(Fiber&&) = default;
|
||||||
|
|
||||||
|
/// Yields control from Fiber 'from' to Fiber 'to'
|
||||||
|
/// Fiber 'from' must be the currently running fiber.
|
||||||
|
static void YieldTo(std::shared_ptr<Fiber>& from, std::shared_ptr<Fiber>& to);
|
||||||
|
static std::shared_ptr<Fiber> ThreadToFiber();
|
||||||
|
|
||||||
|
void SetRewindPoint(std::function<void(void*)>&& rewind_func, void* start_parameter);
|
||||||
|
|
||||||
|
void Rewind();
|
||||||
|
|
||||||
|
/// Only call from main thread's fiber
|
||||||
|
void Exit();
|
||||||
|
|
||||||
|
/// Changes the start parameter of the fiber. Has no effect if the fiber already started
|
||||||
|
void SetStartParameter(void* new_parameter) {
|
||||||
|
start_parameter = new_parameter;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Fiber();
|
||||||
|
|
||||||
|
#if defined(_WIN32) || defined(WIN32)
|
||||||
|
void OnRewind();
|
||||||
|
void Start();
|
||||||
|
static void FiberStartFunc(void* fiber_parameter);
|
||||||
|
static void RewindStartFunc(void* fiber_parameter);
|
||||||
|
#else
|
||||||
|
void OnRewind(boost::context::detail::transfer_t& transfer);
|
||||||
|
void Start(boost::context::detail::transfer_t& transfer);
|
||||||
|
static void FiberStartFunc(boost::context::detail::transfer_t transfer);
|
||||||
|
static void RewindStartFunc(boost::context::detail::transfer_t transfer);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct FiberImpl;
|
||||||
|
|
||||||
|
SpinLock guard{};
|
||||||
|
std::function<void(void*)> entry_point;
|
||||||
|
std::function<void(void*)> rewind_point;
|
||||||
|
void* rewind_parameter{};
|
||||||
|
void* start_parameter{};
|
||||||
|
std::shared_ptr<Fiber> previous_fiber;
|
||||||
|
std::unique_ptr<FiberImpl> impl;
|
||||||
|
bool is_thread_fiber{};
|
||||||
|
bool released{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Common
|
54
src/common/spin_lock.cpp
Normal file
54
src/common/spin_lock.cpp
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/spin_lock.h"
|
||||||
|
|
||||||
|
#if _MSC_VER
|
||||||
|
#include <intrin.h>
|
||||||
|
#if _M_AMD64
|
||||||
|
#define __x86_64__ 1
|
||||||
|
#endif
|
||||||
|
#if _M_ARM64
|
||||||
|
#define __aarch64__ 1
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
#if __x86_64__
|
||||||
|
#include <xmmintrin.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void thread_pause() {
|
||||||
|
#if __x86_64__
|
||||||
|
_mm_pause();
|
||||||
|
#elif __aarch64__ && _MSC_VER
|
||||||
|
__yield();
|
||||||
|
#elif __aarch64__
|
||||||
|
asm("yield");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
void SpinLock::lock() {
|
||||||
|
while (lck.test_and_set(std::memory_order_acquire)) {
|
||||||
|
thread_pause();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void SpinLock::unlock() {
|
||||||
|
lck.clear(std::memory_order_release);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool SpinLock::try_lock() {
|
||||||
|
if (lck.test_and_set(std::memory_order_acquire)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Common
|
21
src/common/spin_lock.h
Normal file
21
src/common/spin_lock.h
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
class SpinLock {
|
||||||
|
public:
|
||||||
|
void lock();
|
||||||
|
void unlock();
|
||||||
|
bool try_lock();
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::atomic_flag lck = ATOMIC_FLAG_INIT;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Common
|
|
@ -9,6 +9,7 @@
|
||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
|
|
||||||
|
@ -28,8 +29,7 @@ public:
|
||||||
is_set = false;
|
is_set = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Duration>
|
bool WaitFor(const std::chrono::nanoseconds& time) {
|
||||||
bool WaitFor(const std::chrono::duration<Duration>& time) {
|
|
||||||
std::unique_lock lk{mutex};
|
std::unique_lock lk{mutex};
|
||||||
if (!condvar.wait_for(lk, time, [this] { return is_set; }))
|
if (!condvar.wait_for(lk, time, [this] { return is_set; }))
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -6,12 +6,38 @@
|
||||||
#include <intrin.h>
|
#include <intrin.h>
|
||||||
|
|
||||||
#pragma intrinsic(_umul128)
|
#pragma intrinsic(_umul128)
|
||||||
|
#pragma intrinsic(_udiv128)
|
||||||
#endif
|
#endif
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include "common/uint128.h"
|
#include "common/uint128.h"
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
|
|
||||||
|
#ifdef _MSC_VER
|
||||||
|
|
||||||
|
u64 MultiplyAndDivide64(u64 a, u64 b, u64 d) {
|
||||||
|
u128 r{};
|
||||||
|
r[0] = _umul128(a, b, &r[1]);
|
||||||
|
u64 remainder;
|
||||||
|
#if _MSC_VER < 1923
|
||||||
|
return udiv128(r[1], r[0], d, &remainder);
|
||||||
|
#else
|
||||||
|
return _udiv128(r[1], r[0], d, &remainder);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
u64 MultiplyAndDivide64(u64 a, u64 b, u64 d) {
|
||||||
|
const u64 diva = a / d;
|
||||||
|
const u64 moda = a % d;
|
||||||
|
const u64 divb = b / d;
|
||||||
|
const u64 modb = b % d;
|
||||||
|
return diva * b + moda * divb + moda * modb / d;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
u128 Multiply64Into128(u64 a, u64 b) {
|
u128 Multiply64Into128(u64 a, u64 b) {
|
||||||
u128 result;
|
u128 result;
|
||||||
#ifdef _MSC_VER
|
#ifdef _MSC_VER
|
||||||
|
|
|
@ -9,6 +9,9 @@
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
|
|
||||||
|
// This function multiplies 2 u64 values and divides it by a u64 value.
|
||||||
|
u64 MultiplyAndDivide64(u64 a, u64 b, u64 d);
|
||||||
|
|
||||||
// This function multiplies 2 u64 values and produces a u128 value;
|
// This function multiplies 2 u64 values and produces a u128 value;
|
||||||
u128 Multiply64Into128(u64 a, u64 b);
|
u128 Multiply64Into128(u64 a, u64 b);
|
||||||
|
|
||||||
|
|
92
src/common/wall_clock.cpp
Normal file
92
src/common/wall_clock.cpp
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/uint128.h"
|
||||||
|
#include "common/wall_clock.h"
|
||||||
|
|
||||||
|
#ifdef ARCHITECTURE_x86_64
|
||||||
|
#include "common/x64/cpu_detect.h"
|
||||||
|
#include "common/x64/native_clock.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
using base_timer = std::chrono::steady_clock;
|
||||||
|
using base_time_point = std::chrono::time_point<base_timer>;
|
||||||
|
|
||||||
|
class StandardWallClock : public WallClock {
|
||||||
|
public:
|
||||||
|
StandardWallClock(u64 emulated_cpu_frequency, u64 emulated_clock_frequency)
|
||||||
|
: WallClock(emulated_cpu_frequency, emulated_clock_frequency, false) {
|
||||||
|
start_time = base_timer::now();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::chrono::nanoseconds GetTimeNS() override {
|
||||||
|
base_time_point current = base_timer::now();
|
||||||
|
auto elapsed = current - start_time;
|
||||||
|
return std::chrono::duration_cast<std::chrono::nanoseconds>(elapsed);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::chrono::microseconds GetTimeUS() override {
|
||||||
|
base_time_point current = base_timer::now();
|
||||||
|
auto elapsed = current - start_time;
|
||||||
|
return std::chrono::duration_cast<std::chrono::microseconds>(elapsed);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::chrono::milliseconds GetTimeMS() override {
|
||||||
|
base_time_point current = base_timer::now();
|
||||||
|
auto elapsed = current - start_time;
|
||||||
|
return std::chrono::duration_cast<std::chrono::milliseconds>(elapsed);
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 GetClockCycles() override {
|
||||||
|
std::chrono::nanoseconds time_now = GetTimeNS();
|
||||||
|
const u128 temporary =
|
||||||
|
Common::Multiply64Into128(time_now.count(), emulated_clock_frequency);
|
||||||
|
return Common::Divide128On32(temporary, 1000000000).first;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 GetCPUCycles() override {
|
||||||
|
std::chrono::nanoseconds time_now = GetTimeNS();
|
||||||
|
const u128 temporary = Common::Multiply64Into128(time_now.count(), emulated_cpu_frequency);
|
||||||
|
return Common::Divide128On32(temporary, 1000000000).first;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
base_time_point start_time;
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifdef ARCHITECTURE_x86_64
|
||||||
|
|
||||||
|
std::unique_ptr<WallClock> CreateBestMatchingClock(u32 emulated_cpu_frequency,
|
||||||
|
u32 emulated_clock_frequency) {
|
||||||
|
const auto& caps = GetCPUCaps();
|
||||||
|
u64 rtsc_frequency = 0;
|
||||||
|
if (caps.invariant_tsc) {
|
||||||
|
if (caps.base_frequency != 0) {
|
||||||
|
rtsc_frequency = static_cast<u64>(caps.base_frequency) * 1000000U;
|
||||||
|
}
|
||||||
|
if (rtsc_frequency == 0) {
|
||||||
|
rtsc_frequency = EstimateRDTSCFrequency();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (rtsc_frequency == 0) {
|
||||||
|
return std::make_unique<StandardWallClock>(emulated_cpu_frequency,
|
||||||
|
emulated_clock_frequency);
|
||||||
|
} else {
|
||||||
|
return std::make_unique<X64::NativeClock>(emulated_cpu_frequency, emulated_clock_frequency,
|
||||||
|
rtsc_frequency);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
std::unique_ptr<WallClock> CreateBestMatchingClock(u32 emulated_cpu_frequency,
|
||||||
|
u32 emulated_clock_frequency) {
|
||||||
|
return std::make_unique<StandardWallClock>(emulated_cpu_frequency, emulated_clock_frequency);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} // namespace Common
|
51
src/common/wall_clock.h
Normal file
51
src/common/wall_clock.h
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
class WallClock {
|
||||||
|
public:
|
||||||
|
/// Returns current wall time in nanoseconds
|
||||||
|
virtual std::chrono::nanoseconds GetTimeNS() = 0;
|
||||||
|
|
||||||
|
/// Returns current wall time in microseconds
|
||||||
|
virtual std::chrono::microseconds GetTimeUS() = 0;
|
||||||
|
|
||||||
|
/// Returns current wall time in milliseconds
|
||||||
|
virtual std::chrono::milliseconds GetTimeMS() = 0;
|
||||||
|
|
||||||
|
/// Returns current wall time in emulated clock cycles
|
||||||
|
virtual u64 GetClockCycles() = 0;
|
||||||
|
|
||||||
|
/// Returns current wall time in emulated cpu cycles
|
||||||
|
virtual u64 GetCPUCycles() = 0;
|
||||||
|
|
||||||
|
/// Tells if the wall clock, uses the host CPU's hardware clock
|
||||||
|
bool IsNative() const {
|
||||||
|
return is_native;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
WallClock(u64 emulated_cpu_frequency, u64 emulated_clock_frequency, bool is_native)
|
||||||
|
: emulated_cpu_frequency{emulated_cpu_frequency},
|
||||||
|
emulated_clock_frequency{emulated_clock_frequency}, is_native{is_native} {}
|
||||||
|
|
||||||
|
u64 emulated_cpu_frequency;
|
||||||
|
u64 emulated_clock_frequency;
|
||||||
|
|
||||||
|
private:
|
||||||
|
bool is_native;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::unique_ptr<WallClock> CreateBestMatchingClock(u32 emulated_cpu_frequency,
|
||||||
|
u32 emulated_clock_frequency);
|
||||||
|
|
||||||
|
} // namespace Common
|
|
@ -62,6 +62,17 @@ static CPUCaps Detect() {
|
||||||
std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(int));
|
std::memcpy(&caps.brand_string[0], &cpu_id[1], sizeof(int));
|
||||||
std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(int));
|
std::memcpy(&caps.brand_string[4], &cpu_id[3], sizeof(int));
|
||||||
std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(int));
|
std::memcpy(&caps.brand_string[8], &cpu_id[2], sizeof(int));
|
||||||
|
if (cpu_id[1] == 0x756e6547 && cpu_id[2] == 0x6c65746e && cpu_id[3] == 0x49656e69)
|
||||||
|
caps.manufacturer = Manufacturer::Intel;
|
||||||
|
else if (cpu_id[1] == 0x68747541 && cpu_id[2] == 0x444d4163 && cpu_id[3] == 0x69746e65)
|
||||||
|
caps.manufacturer = Manufacturer::AMD;
|
||||||
|
else if (cpu_id[1] == 0x6f677948 && cpu_id[2] == 0x656e6975 && cpu_id[3] == 0x6e65476e)
|
||||||
|
caps.manufacturer = Manufacturer::Hygon;
|
||||||
|
else
|
||||||
|
caps.manufacturer = Manufacturer::Unknown;
|
||||||
|
|
||||||
|
u32 family = {};
|
||||||
|
u32 model = {};
|
||||||
|
|
||||||
__cpuid(cpu_id, 0x80000000);
|
__cpuid(cpu_id, 0x80000000);
|
||||||
|
|
||||||
|
@ -73,6 +84,14 @@ static CPUCaps Detect() {
|
||||||
// Detect family and other miscellaneous features
|
// Detect family and other miscellaneous features
|
||||||
if (max_std_fn >= 1) {
|
if (max_std_fn >= 1) {
|
||||||
__cpuid(cpu_id, 0x00000001);
|
__cpuid(cpu_id, 0x00000001);
|
||||||
|
family = (cpu_id[0] >> 8) & 0xf;
|
||||||
|
model = (cpu_id[0] >> 4) & 0xf;
|
||||||
|
if (family == 0xf) {
|
||||||
|
family += (cpu_id[0] >> 20) & 0xff;
|
||||||
|
}
|
||||||
|
if (family >= 6) {
|
||||||
|
model += ((cpu_id[0] >> 16) & 0xf) << 4;
|
||||||
|
}
|
||||||
|
|
||||||
if ((cpu_id[3] >> 25) & 1)
|
if ((cpu_id[3] >> 25) & 1)
|
||||||
caps.sse = true;
|
caps.sse = true;
|
||||||
|
@ -135,6 +154,20 @@ static CPUCaps Detect() {
|
||||||
caps.fma4 = true;
|
caps.fma4 = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (max_ex_fn >= 0x80000007) {
|
||||||
|
__cpuid(cpu_id, 0x80000007);
|
||||||
|
if (cpu_id[3] & (1 << 8)) {
|
||||||
|
caps.invariant_tsc = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (max_std_fn >= 0x16) {
|
||||||
|
__cpuid(cpu_id, 0x16);
|
||||||
|
caps.base_frequency = cpu_id[0];
|
||||||
|
caps.max_frequency = cpu_id[1];
|
||||||
|
caps.bus_frequency = cpu_id[2];
|
||||||
|
}
|
||||||
|
|
||||||
return caps;
|
return caps;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,8 +6,16 @@
|
||||||
|
|
||||||
namespace Common {
|
namespace Common {
|
||||||
|
|
||||||
|
enum class Manufacturer : u32 {
|
||||||
|
Intel = 0,
|
||||||
|
AMD = 1,
|
||||||
|
Hygon = 2,
|
||||||
|
Unknown = 3,
|
||||||
|
};
|
||||||
|
|
||||||
/// x86/x64 CPU capabilities that may be detected by this module
|
/// x86/x64 CPU capabilities that may be detected by this module
|
||||||
struct CPUCaps {
|
struct CPUCaps {
|
||||||
|
Manufacturer manufacturer;
|
||||||
char cpu_string[0x21];
|
char cpu_string[0x21];
|
||||||
char brand_string[0x41];
|
char brand_string[0x41];
|
||||||
bool sse;
|
bool sse;
|
||||||
|
@ -25,6 +33,10 @@ struct CPUCaps {
|
||||||
bool fma;
|
bool fma;
|
||||||
bool fma4;
|
bool fma4;
|
||||||
bool aes;
|
bool aes;
|
||||||
|
bool invariant_tsc;
|
||||||
|
u32 base_frequency;
|
||||||
|
u32 max_frequency;
|
||||||
|
u32 bus_frequency;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
95
src/common/x64/native_clock.cpp
Normal file
95
src/common/x64/native_clock.cpp
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#ifdef _MSC_VER
|
||||||
|
#include <intrin.h>
|
||||||
|
#else
|
||||||
|
#include <x86intrin.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "common/uint128.h"
|
||||||
|
#include "common/x64/native_clock.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
u64 EstimateRDTSCFrequency() {
|
||||||
|
const auto milli_10 = std::chrono::milliseconds{10};
|
||||||
|
// get current time
|
||||||
|
_mm_mfence();
|
||||||
|
const u64 tscStart = __rdtsc();
|
||||||
|
const auto startTime = std::chrono::high_resolution_clock::now();
|
||||||
|
// wait roughly 3 seconds
|
||||||
|
while (true) {
|
||||||
|
auto milli = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||||
|
std::chrono::high_resolution_clock::now() - startTime);
|
||||||
|
if (milli.count() >= 3000)
|
||||||
|
break;
|
||||||
|
std::this_thread::sleep_for(milli_10);
|
||||||
|
}
|
||||||
|
const auto endTime = std::chrono::high_resolution_clock::now();
|
||||||
|
_mm_mfence();
|
||||||
|
const u64 tscEnd = __rdtsc();
|
||||||
|
// calculate difference
|
||||||
|
const u64 timer_diff =
|
||||||
|
std::chrono::duration_cast<std::chrono::nanoseconds>(endTime - startTime).count();
|
||||||
|
const u64 tsc_diff = tscEnd - tscStart;
|
||||||
|
const u64 tsc_freq = MultiplyAndDivide64(tsc_diff, 1000000000ULL, timer_diff);
|
||||||
|
return tsc_freq;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace X64 {
|
||||||
|
NativeClock::NativeClock(u64 emulated_cpu_frequency, u64 emulated_clock_frequency,
|
||||||
|
u64 rtsc_frequency)
|
||||||
|
: WallClock(emulated_cpu_frequency, emulated_clock_frequency, true), rtsc_frequency{
|
||||||
|
rtsc_frequency} {
|
||||||
|
_mm_mfence();
|
||||||
|
last_measure = __rdtsc();
|
||||||
|
accumulated_ticks = 0U;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 NativeClock::GetRTSC() {
|
||||||
|
rtsc_serialize.lock();
|
||||||
|
_mm_mfence();
|
||||||
|
const u64 current_measure = __rdtsc();
|
||||||
|
u64 diff = current_measure - last_measure;
|
||||||
|
diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0)
|
||||||
|
if (current_measure > last_measure) {
|
||||||
|
last_measure = current_measure;
|
||||||
|
}
|
||||||
|
accumulated_ticks += diff;
|
||||||
|
rtsc_serialize.unlock();
|
||||||
|
return accumulated_ticks;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::chrono::nanoseconds NativeClock::GetTimeNS() {
|
||||||
|
const u64 rtsc_value = GetRTSC();
|
||||||
|
return std::chrono::nanoseconds{MultiplyAndDivide64(rtsc_value, 1000000000, rtsc_frequency)};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::chrono::microseconds NativeClock::GetTimeUS() {
|
||||||
|
const u64 rtsc_value = GetRTSC();
|
||||||
|
return std::chrono::microseconds{MultiplyAndDivide64(rtsc_value, 1000000, rtsc_frequency)};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::chrono::milliseconds NativeClock::GetTimeMS() {
|
||||||
|
const u64 rtsc_value = GetRTSC();
|
||||||
|
return std::chrono::milliseconds{MultiplyAndDivide64(rtsc_value, 1000, rtsc_frequency)};
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 NativeClock::GetClockCycles() {
|
||||||
|
const u64 rtsc_value = GetRTSC();
|
||||||
|
return MultiplyAndDivide64(rtsc_value, emulated_clock_frequency, rtsc_frequency);
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 NativeClock::GetCPUCycles() {
|
||||||
|
const u64 rtsc_value = GetRTSC();
|
||||||
|
return MultiplyAndDivide64(rtsc_value, emulated_cpu_frequency, rtsc_frequency);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace X64
|
||||||
|
|
||||||
|
} // namespace Common
|
41
src/common/x64/native_clock.h
Normal file
41
src/common/x64/native_clock.h
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
|
|
||||||
|
#include "common/spin_lock.h"
|
||||||
|
#include "common/wall_clock.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
namespace X64 {
|
||||||
|
class NativeClock : public WallClock {
|
||||||
|
public:
|
||||||
|
NativeClock(u64 emulated_cpu_frequency, u64 emulated_clock_frequency, u64 rtsc_frequency);
|
||||||
|
|
||||||
|
std::chrono::nanoseconds GetTimeNS() override;
|
||||||
|
|
||||||
|
std::chrono::microseconds GetTimeUS() override;
|
||||||
|
|
||||||
|
std::chrono::milliseconds GetTimeMS() override;
|
||||||
|
|
||||||
|
u64 GetClockCycles() override;
|
||||||
|
|
||||||
|
u64 GetCPUCycles() override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
u64 GetRTSC();
|
||||||
|
|
||||||
|
SpinLock rtsc_serialize{};
|
||||||
|
u64 last_measure{};
|
||||||
|
u64 accumulated_ticks{};
|
||||||
|
u64 rtsc_frequency;
|
||||||
|
};
|
||||||
|
} // namespace X64
|
||||||
|
|
||||||
|
u64 EstimateRDTSCFrequency();
|
||||||
|
|
||||||
|
} // namespace Common
|
|
@ -547,6 +547,8 @@ add_library(core STATIC
|
||||||
hle/service/vi/vi_u.h
|
hle/service/vi/vi_u.h
|
||||||
hle/service/wlan/wlan.cpp
|
hle/service/wlan/wlan.cpp
|
||||||
hle/service/wlan/wlan.h
|
hle/service/wlan/wlan.h
|
||||||
|
host_timing.cpp
|
||||||
|
host_timing.h
|
||||||
loader/deconstructed_rom_directory.cpp
|
loader/deconstructed_rom_directory.cpp
|
||||||
loader/deconstructed_rom_directory.h
|
loader/deconstructed_rom_directory.h
|
||||||
loader/elf.cpp
|
loader/elf.cpp
|
||||||
|
|
|
@ -49,6 +49,21 @@ s64 nsToCycles(std::chrono::nanoseconds ns) {
|
||||||
return (Hardware::BASE_CLOCK_RATE * ns.count()) / 1000000000;
|
return (Hardware::BASE_CLOCK_RATE * ns.count()) / 1000000000;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u64 msToClockCycles(std::chrono::milliseconds ns) {
|
||||||
|
const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
|
||||||
|
return Common::Divide128On32(temp, 1000).first;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 usToClockCycles(std::chrono::microseconds ns) {
|
||||||
|
const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
|
||||||
|
return Common::Divide128On32(temp, 1000000).first;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 nsToClockCycles(std::chrono::nanoseconds ns) {
|
||||||
|
const u128 temp = Common::Multiply64Into128(ns.count(), Hardware::CNTFREQ);
|
||||||
|
return Common::Divide128On32(temp, 1000000000).first;
|
||||||
|
}
|
||||||
|
|
||||||
u64 CpuCyclesToClockCycles(u64 ticks) {
|
u64 CpuCyclesToClockCycles(u64 ticks) {
|
||||||
const u128 temporal = Common::Multiply64Into128(ticks, Hardware::CNTFREQ);
|
const u128 temporal = Common::Multiply64Into128(ticks, Hardware::CNTFREQ);
|
||||||
return Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
return Common::Divide128On32(temporal, static_cast<u32>(Hardware::BASE_CLOCK_RATE)).first;
|
||||||
|
|
|
@ -13,6 +13,9 @@ namespace Core::Timing {
|
||||||
s64 msToCycles(std::chrono::milliseconds ms);
|
s64 msToCycles(std::chrono::milliseconds ms);
|
||||||
s64 usToCycles(std::chrono::microseconds us);
|
s64 usToCycles(std::chrono::microseconds us);
|
||||||
s64 nsToCycles(std::chrono::nanoseconds ns);
|
s64 nsToCycles(std::chrono::nanoseconds ns);
|
||||||
|
u64 msToClockCycles(std::chrono::milliseconds ns);
|
||||||
|
u64 usToClockCycles(std::chrono::microseconds ns);
|
||||||
|
u64 nsToClockCycles(std::chrono::nanoseconds ns);
|
||||||
|
|
||||||
inline std::chrono::milliseconds CyclesToMs(s64 cycles) {
|
inline std::chrono::milliseconds CyclesToMs(s64 cycles) {
|
||||||
return std::chrono::milliseconds(cycles * 1000 / Hardware::BASE_CLOCK_RATE);
|
return std::chrono::milliseconds(cycles * 1000 / Hardware::BASE_CLOCK_RATE);
|
||||||
|
|
206
src/core/host_timing.cpp
Normal file
206
src/core/host_timing.cpp
Normal file
|
@ -0,0 +1,206 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "core/host_timing.h"
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <mutex>
|
||||||
|
#include <string>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "core/core_timing_util.h"
|
||||||
|
|
||||||
|
namespace Core::HostTiming {
|
||||||
|
|
||||||
|
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
|
||||||
|
return std::make_shared<EventType>(std::move(callback), std::move(name));
|
||||||
|
}
|
||||||
|
|
||||||
|
struct CoreTiming::Event {
|
||||||
|
u64 time;
|
||||||
|
u64 fifo_order;
|
||||||
|
u64 userdata;
|
||||||
|
std::weak_ptr<EventType> type;
|
||||||
|
|
||||||
|
// Sort by time, unless the times are the same, in which case sort by
|
||||||
|
// the order added to the queue
|
||||||
|
friend bool operator>(const Event& left, const Event& right) {
|
||||||
|
return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order);
|
||||||
|
}
|
||||||
|
|
||||||
|
friend bool operator<(const Event& left, const Event& right) {
|
||||||
|
return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
CoreTiming::CoreTiming() {
|
||||||
|
clock =
|
||||||
|
Common::CreateBestMatchingClock(Core::Hardware::BASE_CLOCK_RATE, Core::Hardware::CNTFREQ);
|
||||||
|
}
|
||||||
|
|
||||||
|
CoreTiming::~CoreTiming() = default;
|
||||||
|
|
||||||
|
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||||
|
instance.ThreadLoop();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::Initialize() {
|
||||||
|
event_fifo_id = 0;
|
||||||
|
const auto empty_timed_callback = [](u64, s64) {};
|
||||||
|
ev_lost = CreateEvent("_lost_event", empty_timed_callback);
|
||||||
|
timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this));
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::Shutdown() {
|
||||||
|
paused = true;
|
||||||
|
shutting_down = true;
|
||||||
|
event.Set();
|
||||||
|
timer_thread->join();
|
||||||
|
ClearPendingEvents();
|
||||||
|
timer_thread.reset();
|
||||||
|
has_started = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::Pause(bool is_paused) {
|
||||||
|
paused = is_paused;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::SyncPause(bool is_paused) {
|
||||||
|
if (is_paused == paused && paused_set == paused) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Pause(is_paused);
|
||||||
|
event.Set();
|
||||||
|
while (paused_set != is_paused)
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CoreTiming::IsRunning() const {
|
||||||
|
return !paused_set;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool CoreTiming::HasPendingEvents() const {
|
||||||
|
return !(wait_set && event_queue.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
|
||||||
|
u64 userdata) {
|
||||||
|
basic_lock.lock();
|
||||||
|
const u64 timeout = static_cast<u64>(GetGlobalTimeNs().count() + ns_into_future);
|
||||||
|
|
||||||
|
event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
|
||||||
|
|
||||||
|
std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||||
|
basic_lock.unlock();
|
||||||
|
event.Set();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata) {
|
||||||
|
basic_lock.lock();
|
||||||
|
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||||
|
return e.type.lock().get() == event_type.get() && e.userdata == userdata;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Removing random items breaks the invariant so we have to re-establish it.
|
||||||
|
if (itr != event_queue.end()) {
|
||||||
|
event_queue.erase(itr, event_queue.end());
|
||||||
|
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||||
|
}
|
||||||
|
basic_lock.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::AddTicks(std::size_t core_index, u64 ticks) {
|
||||||
|
ticks_count[core_index] += ticks;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::ResetTicks(std::size_t core_index) {
|
||||||
|
ticks_count[core_index] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 CoreTiming::GetCPUTicks() const {
|
||||||
|
return clock->GetCPUCycles();
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 CoreTiming::GetClockTicks() const {
|
||||||
|
return clock->GetClockCycles();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::ClearPendingEvents() {
|
||||||
|
event_queue.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||||
|
basic_lock.lock();
|
||||||
|
|
||||||
|
const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
|
||||||
|
return e.type.lock().get() == event_type.get();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Removing random items breaks the invariant so we have to re-establish it.
|
||||||
|
if (itr != event_queue.end()) {
|
||||||
|
event_queue.erase(itr, event_queue.end());
|
||||||
|
std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||||
|
}
|
||||||
|
basic_lock.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<u64> CoreTiming::Advance() {
|
||||||
|
advance_lock.lock();
|
||||||
|
basic_lock.lock();
|
||||||
|
global_timer = GetGlobalTimeNs().count();
|
||||||
|
|
||||||
|
while (!event_queue.empty() && event_queue.front().time <= global_timer) {
|
||||||
|
Event evt = std::move(event_queue.front());
|
||||||
|
std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
|
||||||
|
event_queue.pop_back();
|
||||||
|
basic_lock.unlock();
|
||||||
|
|
||||||
|
if (auto event_type{evt.type.lock()}) {
|
||||||
|
event_type->callback(evt.userdata, global_timer - evt.time);
|
||||||
|
}
|
||||||
|
|
||||||
|
basic_lock.lock();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!event_queue.empty()) {
|
||||||
|
const u64 next_time = event_queue.front().time - global_timer;
|
||||||
|
basic_lock.unlock();
|
||||||
|
advance_lock.unlock();
|
||||||
|
return next_time;
|
||||||
|
} else {
|
||||||
|
basic_lock.unlock();
|
||||||
|
advance_lock.unlock();
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void CoreTiming::ThreadLoop() {
|
||||||
|
has_started = true;
|
||||||
|
while (!shutting_down) {
|
||||||
|
while (!paused) {
|
||||||
|
paused_set = false;
|
||||||
|
const auto next_time = Advance();
|
||||||
|
if (next_time) {
|
||||||
|
std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
|
||||||
|
event.WaitFor(next_time_ns);
|
||||||
|
} else {
|
||||||
|
wait_set = true;
|
||||||
|
event.Wait();
|
||||||
|
}
|
||||||
|
wait_set = false;
|
||||||
|
}
|
||||||
|
paused_set = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
||||||
|
return clock->GetTimeNS();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
|
||||||
|
return clock->GetTimeUS();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Core::HostTiming
|
160
src/core/host_timing.h
Normal file
160
src/core/host_timing.h
Normal file
|
@ -0,0 +1,160 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <chrono>
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
#include <optional>
|
||||||
|
#include <string>
|
||||||
|
#include <thread>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/spin_lock.h"
|
||||||
|
#include "common/thread.h"
|
||||||
|
#include "common/threadsafe_queue.h"
|
||||||
|
#include "common/wall_clock.h"
|
||||||
|
#include "core/hardware_properties.h"
|
||||||
|
|
||||||
|
namespace Core::HostTiming {
|
||||||
|
|
||||||
|
/// A callback that may be scheduled for a particular core timing event.
|
||||||
|
using TimedCallback = std::function<void(u64 userdata, s64 cycles_late)>;
|
||||||
|
|
||||||
|
/// Contains the characteristics of a particular event.
|
||||||
|
struct EventType {
|
||||||
|
EventType(TimedCallback&& callback, std::string&& name)
|
||||||
|
: callback{std::move(callback)}, name{std::move(name)} {}
|
||||||
|
|
||||||
|
/// The event's callback function.
|
||||||
|
TimedCallback callback;
|
||||||
|
/// A pointer to the name of the event.
|
||||||
|
const std::string name;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is a system to schedule events into the emulated machine's future. Time is measured
|
||||||
|
* in main CPU clock cycles.
|
||||||
|
*
|
||||||
|
* To schedule an event, you first have to register its type. This is where you pass in the
|
||||||
|
* callback. You then schedule events using the type id you get back.
|
||||||
|
*
|
||||||
|
* The int cyclesLate that the callbacks get is how many cycles late it was.
|
||||||
|
* So to schedule a new event on a regular basis:
|
||||||
|
* inside callback:
|
||||||
|
* ScheduleEvent(periodInCycles - cyclesLate, callback, "whatever")
|
||||||
|
*/
|
||||||
|
class CoreTiming {
|
||||||
|
public:
|
||||||
|
CoreTiming();
|
||||||
|
~CoreTiming();
|
||||||
|
|
||||||
|
CoreTiming(const CoreTiming&) = delete;
|
||||||
|
CoreTiming(CoreTiming&&) = delete;
|
||||||
|
|
||||||
|
CoreTiming& operator=(const CoreTiming&) = delete;
|
||||||
|
CoreTiming& operator=(CoreTiming&&) = delete;
|
||||||
|
|
||||||
|
/// CoreTiming begins at the boundary of timing slice -1. An initial call to Advance() is
|
||||||
|
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
|
||||||
|
void Initialize();
|
||||||
|
|
||||||
|
/// Tears down all timing related functionality.
|
||||||
|
void Shutdown();
|
||||||
|
|
||||||
|
/// Pauses/Unpauses the execution of the timer thread.
|
||||||
|
void Pause(bool is_paused);
|
||||||
|
|
||||||
|
/// Pauses/Unpauses the execution of the timer thread and waits until paused.
|
||||||
|
void SyncPause(bool is_paused);
|
||||||
|
|
||||||
|
/// Checks if core timing is running.
|
||||||
|
bool IsRunning() const;
|
||||||
|
|
||||||
|
/// Checks if the timer thread has started.
|
||||||
|
bool HasStarted() const {
|
||||||
|
return has_started;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if there are any pending time events.
|
||||||
|
bool HasPendingEvents() const;
|
||||||
|
|
||||||
|
/// Schedules an event in core timing
|
||||||
|
void ScheduleEvent(s64 ns_into_future, const std::shared_ptr<EventType>& event_type,
|
||||||
|
u64 userdata = 0);
|
||||||
|
|
||||||
|
void UnscheduleEvent(const std::shared_ptr<EventType>& event_type, u64 userdata);
|
||||||
|
|
||||||
|
/// We only permit one event of each type in the queue at a time.
|
||||||
|
void RemoveEvent(const std::shared_ptr<EventType>& event_type);
|
||||||
|
|
||||||
|
void AddTicks(std::size_t core_index, u64 ticks);
|
||||||
|
|
||||||
|
void ResetTicks(std::size_t core_index);
|
||||||
|
|
||||||
|
/// Returns current time in emulated CPU cycles
|
||||||
|
u64 GetCPUTicks() const;
|
||||||
|
|
||||||
|
/// Returns current time in emulated in Clock cycles
|
||||||
|
u64 GetClockTicks() const;
|
||||||
|
|
||||||
|
/// Returns current time in microseconds.
|
||||||
|
std::chrono::microseconds GetGlobalTimeUs() const;
|
||||||
|
|
||||||
|
/// Returns current time in nanoseconds.
|
||||||
|
std::chrono::nanoseconds GetGlobalTimeNs() const;
|
||||||
|
|
||||||
|
/// Checks for events manually and returns time in nanoseconds for next event, threadsafe.
|
||||||
|
std::optional<u64> Advance();
|
||||||
|
|
||||||
|
private:
|
||||||
|
struct Event;
|
||||||
|
|
||||||
|
/// Clear all pending events. This should ONLY be done on exit.
|
||||||
|
void ClearPendingEvents();
|
||||||
|
|
||||||
|
static void ThreadEntry(CoreTiming& instance);
|
||||||
|
void ThreadLoop();
|
||||||
|
|
||||||
|
std::unique_ptr<Common::WallClock> clock;
|
||||||
|
|
||||||
|
u64 global_timer = 0;
|
||||||
|
|
||||||
|
std::chrono::nanoseconds start_point;
|
||||||
|
|
||||||
|
// The queue is a min-heap using std::make_heap/push_heap/pop_heap.
|
||||||
|
// We don't use std::priority_queue because we need to be able to serialize, unserialize and
|
||||||
|
// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't
|
||||||
|
// accomodated by the standard adaptor class.
|
||||||
|
std::vector<Event> event_queue;
|
||||||
|
u64 event_fifo_id = 0;
|
||||||
|
|
||||||
|
std::shared_ptr<EventType> ev_lost;
|
||||||
|
Common::Event event{};
|
||||||
|
Common::SpinLock basic_lock{};
|
||||||
|
Common::SpinLock advance_lock{};
|
||||||
|
std::unique_ptr<std::thread> timer_thread;
|
||||||
|
std::atomic<bool> paused{};
|
||||||
|
std::atomic<bool> paused_set{};
|
||||||
|
std::atomic<bool> wait_set{};
|
||||||
|
std::atomic<bool> shutting_down{};
|
||||||
|
std::atomic<bool> has_started{};
|
||||||
|
|
||||||
|
std::array<std::atomic<u64>, Core::Hardware::NUM_CPU_CORES> ticks_count{};
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Creates a core timing event with the given name and callback.
|
||||||
|
///
|
||||||
|
/// @param name The name of the core timing event to create.
|
||||||
|
/// @param callback The callback to execute for the event.
|
||||||
|
///
|
||||||
|
/// @returns An EventType instance representing the created event.
|
||||||
|
///
|
||||||
|
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback);
|
||||||
|
|
||||||
|
} // namespace Core::HostTiming
|
|
@ -1,12 +1,14 @@
|
||||||
add_executable(tests
|
add_executable(tests
|
||||||
common/bit_field.cpp
|
common/bit_field.cpp
|
||||||
common/bit_utils.cpp
|
common/bit_utils.cpp
|
||||||
|
common/fibers.cpp
|
||||||
common/multi_level_queue.cpp
|
common/multi_level_queue.cpp
|
||||||
common/param_package.cpp
|
common/param_package.cpp
|
||||||
common/ring_buffer.cpp
|
common/ring_buffer.cpp
|
||||||
core/arm/arm_test_common.cpp
|
core/arm/arm_test_common.cpp
|
||||||
core/arm/arm_test_common.h
|
core/arm/arm_test_common.h
|
||||||
core/core_timing.cpp
|
core/core_timing.cpp
|
||||||
|
core/host_timing.cpp
|
||||||
tests.cpp
|
tests.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
358
src/tests/common/fibers.cpp
Normal file
358
src/tests/common/fibers.cpp
Normal file
|
@ -0,0 +1,358 @@
|
||||||
|
// Copyright 2020 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <thread>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include <catch2/catch.hpp>
|
||||||
|
#include <math.h>
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/fiber.h"
|
||||||
|
#include "common/spin_lock.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
class TestControl1 {
|
||||||
|
public:
|
||||||
|
TestControl1() = default;
|
||||||
|
|
||||||
|
void DoWork();
|
||||||
|
|
||||||
|
void ExecuteThread(u32 id);
|
||||||
|
|
||||||
|
std::unordered_map<std::thread::id, u32> ids;
|
||||||
|
std::vector<std::shared_ptr<Common::Fiber>> thread_fibers;
|
||||||
|
std::vector<std::shared_ptr<Common::Fiber>> work_fibers;
|
||||||
|
std::vector<u32> items;
|
||||||
|
std::vector<u32> results;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void WorkControl1(void* control) {
|
||||||
|
auto* test_control = static_cast<TestControl1*>(control);
|
||||||
|
test_control->DoWork();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestControl1::DoWork() {
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
u32 id = ids[this_id];
|
||||||
|
u32 value = items[id];
|
||||||
|
for (u32 i = 0; i < id; i++) {
|
||||||
|
value++;
|
||||||
|
}
|
||||||
|
results[id] = value;
|
||||||
|
Fiber::YieldTo(work_fibers[id], thread_fibers[id]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestControl1::ExecuteThread(u32 id) {
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
ids[this_id] = id;
|
||||||
|
auto thread_fiber = Fiber::ThreadToFiber();
|
||||||
|
thread_fibers[id] = thread_fiber;
|
||||||
|
work_fibers[id] = std::make_shared<Fiber>(std::function<void(void*)>{WorkControl1}, this);
|
||||||
|
items[id] = rand() % 256;
|
||||||
|
Fiber::YieldTo(thread_fibers[id], work_fibers[id]);
|
||||||
|
thread_fibers[id]->Exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ThreadStart1(u32 id, TestControl1& test_control) {
|
||||||
|
test_control.ExecuteThread(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** This test checks for fiber setup configuration and validates that fibers are
|
||||||
|
* doing all the work required.
|
||||||
|
*/
|
||||||
|
TEST_CASE("Fibers::Setup", "[common]") {
|
||||||
|
constexpr u32 num_threads = 7;
|
||||||
|
TestControl1 test_control{};
|
||||||
|
test_control.thread_fibers.resize(num_threads);
|
||||||
|
test_control.work_fibers.resize(num_threads);
|
||||||
|
test_control.items.resize(num_threads, 0);
|
||||||
|
test_control.results.resize(num_threads, 0);
|
||||||
|
std::vector<std::thread> threads;
|
||||||
|
for (u32 i = 0; i < num_threads; i++) {
|
||||||
|
threads.emplace_back(ThreadStart1, i, std::ref(test_control));
|
||||||
|
}
|
||||||
|
for (u32 i = 0; i < num_threads; i++) {
|
||||||
|
threads[i].join();
|
||||||
|
}
|
||||||
|
for (u32 i = 0; i < num_threads; i++) {
|
||||||
|
REQUIRE(test_control.items[i] + i == test_control.results[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
class TestControl2 {
|
||||||
|
public:
|
||||||
|
TestControl2() = default;
|
||||||
|
|
||||||
|
void DoWork1() {
|
||||||
|
trap2 = false;
|
||||||
|
while (trap.load())
|
||||||
|
;
|
||||||
|
for (u32 i = 0; i < 12000; i++) {
|
||||||
|
value1 += i;
|
||||||
|
}
|
||||||
|
Fiber::YieldTo(fiber1, fiber3);
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
u32 id = ids[this_id];
|
||||||
|
assert1 = id == 1;
|
||||||
|
value2 += 5000;
|
||||||
|
Fiber::YieldTo(fiber1, thread_fibers[id]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DoWork2() {
|
||||||
|
while (trap2.load())
|
||||||
|
;
|
||||||
|
value2 = 2000;
|
||||||
|
trap = false;
|
||||||
|
Fiber::YieldTo(fiber2, fiber1);
|
||||||
|
assert3 = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DoWork3() {
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
u32 id = ids[this_id];
|
||||||
|
assert2 = id == 0;
|
||||||
|
value1 += 1000;
|
||||||
|
Fiber::YieldTo(fiber3, thread_fibers[id]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExecuteThread(u32 id);
|
||||||
|
|
||||||
|
void CallFiber1() {
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
u32 id = ids[this_id];
|
||||||
|
Fiber::YieldTo(thread_fibers[id], fiber1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CallFiber2() {
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
u32 id = ids[this_id];
|
||||||
|
Fiber::YieldTo(thread_fibers[id], fiber2);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Exit();
|
||||||
|
|
||||||
|
bool assert1{};
|
||||||
|
bool assert2{};
|
||||||
|
bool assert3{true};
|
||||||
|
u32 value1{};
|
||||||
|
u32 value2{};
|
||||||
|
std::atomic<bool> trap{true};
|
||||||
|
std::atomic<bool> trap2{true};
|
||||||
|
std::unordered_map<std::thread::id, u32> ids;
|
||||||
|
std::vector<std::shared_ptr<Common::Fiber>> thread_fibers;
|
||||||
|
std::shared_ptr<Common::Fiber> fiber1;
|
||||||
|
std::shared_ptr<Common::Fiber> fiber2;
|
||||||
|
std::shared_ptr<Common::Fiber> fiber3;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void WorkControl2_1(void* control) {
|
||||||
|
auto* test_control = static_cast<TestControl2*>(control);
|
||||||
|
test_control->DoWork1();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void WorkControl2_2(void* control) {
|
||||||
|
auto* test_control = static_cast<TestControl2*>(control);
|
||||||
|
test_control->DoWork2();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void WorkControl2_3(void* control) {
|
||||||
|
auto* test_control = static_cast<TestControl2*>(control);
|
||||||
|
test_control->DoWork3();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestControl2::ExecuteThread(u32 id) {
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
ids[this_id] = id;
|
||||||
|
auto thread_fiber = Fiber::ThreadToFiber();
|
||||||
|
thread_fibers[id] = thread_fiber;
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestControl2::Exit() {
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
u32 id = ids[this_id];
|
||||||
|
thread_fibers[id]->Exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ThreadStart2_1(u32 id, TestControl2& test_control) {
|
||||||
|
test_control.ExecuteThread(id);
|
||||||
|
test_control.CallFiber1();
|
||||||
|
test_control.Exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ThreadStart2_2(u32 id, TestControl2& test_control) {
|
||||||
|
test_control.ExecuteThread(id);
|
||||||
|
test_control.CallFiber2();
|
||||||
|
test_control.Exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** This test checks for fiber thread exchange configuration and validates that fibers are
|
||||||
|
* that a fiber has been succesfully transfered from one thread to another and that the TLS
|
||||||
|
* region of the thread is kept while changing fibers.
|
||||||
|
*/
|
||||||
|
TEST_CASE("Fibers::InterExchange", "[common]") {
|
||||||
|
TestControl2 test_control{};
|
||||||
|
test_control.thread_fibers.resize(2);
|
||||||
|
test_control.fiber1 =
|
||||||
|
std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_1}, &test_control);
|
||||||
|
test_control.fiber2 =
|
||||||
|
std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_2}, &test_control);
|
||||||
|
test_control.fiber3 =
|
||||||
|
std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_3}, &test_control);
|
||||||
|
std::thread thread1(ThreadStart2_1, 0, std::ref(test_control));
|
||||||
|
std::thread thread2(ThreadStart2_2, 1, std::ref(test_control));
|
||||||
|
thread1.join();
|
||||||
|
thread2.join();
|
||||||
|
REQUIRE(test_control.assert1);
|
||||||
|
REQUIRE(test_control.assert2);
|
||||||
|
REQUIRE(test_control.assert3);
|
||||||
|
REQUIRE(test_control.value2 == 7000);
|
||||||
|
u32 cal_value = 0;
|
||||||
|
for (u32 i = 0; i < 12000; i++) {
|
||||||
|
cal_value += i;
|
||||||
|
}
|
||||||
|
cal_value += 1000;
|
||||||
|
REQUIRE(test_control.value1 == cal_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
class TestControl3 {
|
||||||
|
public:
|
||||||
|
TestControl3() = default;
|
||||||
|
|
||||||
|
void DoWork1() {
|
||||||
|
value1 += 1;
|
||||||
|
Fiber::YieldTo(fiber1, fiber2);
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
u32 id = ids[this_id];
|
||||||
|
value3 += 1;
|
||||||
|
Fiber::YieldTo(fiber1, thread_fibers[id]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DoWork2() {
|
||||||
|
value2 += 1;
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
u32 id = ids[this_id];
|
||||||
|
Fiber::YieldTo(fiber2, thread_fibers[id]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExecuteThread(u32 id);
|
||||||
|
|
||||||
|
void CallFiber1() {
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
u32 id = ids[this_id];
|
||||||
|
Fiber::YieldTo(thread_fibers[id], fiber1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Exit();
|
||||||
|
|
||||||
|
u32 value1{};
|
||||||
|
u32 value2{};
|
||||||
|
u32 value3{};
|
||||||
|
std::unordered_map<std::thread::id, u32> ids;
|
||||||
|
std::vector<std::shared_ptr<Common::Fiber>> thread_fibers;
|
||||||
|
std::shared_ptr<Common::Fiber> fiber1;
|
||||||
|
std::shared_ptr<Common::Fiber> fiber2;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void WorkControl3_1(void* control) {
|
||||||
|
auto* test_control = static_cast<TestControl3*>(control);
|
||||||
|
test_control->DoWork1();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void WorkControl3_2(void* control) {
|
||||||
|
auto* test_control = static_cast<TestControl3*>(control);
|
||||||
|
test_control->DoWork2();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestControl3::ExecuteThread(u32 id) {
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
ids[this_id] = id;
|
||||||
|
auto thread_fiber = Fiber::ThreadToFiber();
|
||||||
|
thread_fibers[id] = thread_fiber;
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestControl3::Exit() {
|
||||||
|
std::thread::id this_id = std::this_thread::get_id();
|
||||||
|
u32 id = ids[this_id];
|
||||||
|
thread_fibers[id]->Exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ThreadStart3(u32 id, TestControl3& test_control) {
|
||||||
|
test_control.ExecuteThread(id);
|
||||||
|
test_control.CallFiber1();
|
||||||
|
test_control.Exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** This test checks for one two threads racing for starting the same fiber.
|
||||||
|
* It checks execution occured in an ordered manner and by no time there were
|
||||||
|
* two contexts at the same time.
|
||||||
|
*/
|
||||||
|
TEST_CASE("Fibers::StartRace", "[common]") {
|
||||||
|
TestControl3 test_control{};
|
||||||
|
test_control.thread_fibers.resize(2);
|
||||||
|
test_control.fiber1 =
|
||||||
|
std::make_shared<Fiber>(std::function<void(void*)>{WorkControl3_1}, &test_control);
|
||||||
|
test_control.fiber2 =
|
||||||
|
std::make_shared<Fiber>(std::function<void(void*)>{WorkControl3_2}, &test_control);
|
||||||
|
std::thread thread1(ThreadStart3, 0, std::ref(test_control));
|
||||||
|
std::thread thread2(ThreadStart3, 1, std::ref(test_control));
|
||||||
|
thread1.join();
|
||||||
|
thread2.join();
|
||||||
|
REQUIRE(test_control.value1 == 1);
|
||||||
|
REQUIRE(test_control.value2 == 1);
|
||||||
|
REQUIRE(test_control.value3 == 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
class TestControl4;
|
||||||
|
|
||||||
|
static void WorkControl4(void* control);
|
||||||
|
|
||||||
|
class TestControl4 {
|
||||||
|
public:
|
||||||
|
TestControl4() {
|
||||||
|
fiber1 = std::make_shared<Fiber>(std::function<void(void*)>{WorkControl4}, this);
|
||||||
|
goal_reached = false;
|
||||||
|
rewinded = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Execute() {
|
||||||
|
thread_fiber = Fiber::ThreadToFiber();
|
||||||
|
Fiber::YieldTo(thread_fiber, fiber1);
|
||||||
|
thread_fiber->Exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
void DoWork() {
|
||||||
|
fiber1->SetRewindPoint(std::function<void(void*)>{WorkControl4}, this);
|
||||||
|
if (rewinded) {
|
||||||
|
goal_reached = true;
|
||||||
|
Fiber::YieldTo(fiber1, thread_fiber);
|
||||||
|
}
|
||||||
|
rewinded = true;
|
||||||
|
fiber1->Rewind();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<Common::Fiber> fiber1;
|
||||||
|
std::shared_ptr<Common::Fiber> thread_fiber;
|
||||||
|
bool goal_reached;
|
||||||
|
bool rewinded;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void WorkControl4(void* control) {
|
||||||
|
auto* test_control = static_cast<TestControl4*>(control);
|
||||||
|
test_control->DoWork();
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("Fibers::Rewind", "[common]") {
|
||||||
|
TestControl4 test_control{};
|
||||||
|
test_control.Execute();
|
||||||
|
REQUIRE(test_control.goal_reached);
|
||||||
|
REQUIRE(test_control.rewinded);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Common
|
142
src/tests/core/host_timing.cpp
Normal file
142
src/tests/core/host_timing.cpp
Normal file
|
@ -0,0 +1,142 @@
|
||||||
|
// Copyright 2016 Dolphin Emulator Project / 2017 Dolphin Emulator Project
|
||||||
|
// Licensed under GPLv2+
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <catch2/catch.hpp>
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <bitset>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "common/file_util.h"
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/host_timing.h"
|
||||||
|
|
||||||
|
// Numbers are chosen randomly to make sure the correct one is given.
|
||||||
|
static constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};
|
||||||
|
static constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals
|
||||||
|
static constexpr std::array<u64, 5> calls_order{{2, 0, 1, 4, 3}};
|
||||||
|
static std::array<s64, 5> delays{};
|
||||||
|
|
||||||
|
static std::bitset<CB_IDS.size()> callbacks_ran_flags;
|
||||||
|
static u64 expected_callback = 0;
|
||||||
|
|
||||||
|
template <unsigned int IDX>
|
||||||
|
void HostCallbackTemplate(u64 userdata, s64 nanoseconds_late) {
|
||||||
|
static_assert(IDX < CB_IDS.size(), "IDX out of range");
|
||||||
|
callbacks_ran_flags.set(IDX);
|
||||||
|
REQUIRE(CB_IDS[IDX] == userdata);
|
||||||
|
REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]);
|
||||||
|
delays[IDX] = nanoseconds_late;
|
||||||
|
++expected_callback;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ScopeInit final {
|
||||||
|
ScopeInit() {
|
||||||
|
core_timing.Initialize();
|
||||||
|
}
|
||||||
|
~ScopeInit() {
|
||||||
|
core_timing.Shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
Core::HostTiming::CoreTiming core_timing;
|
||||||
|
};
|
||||||
|
|
||||||
|
#pragma optimize("", off)
|
||||||
|
|
||||||
|
static u64 TestTimerSpeed(Core::HostTiming::CoreTiming& core_timing) {
|
||||||
|
u64 start = core_timing.GetGlobalTimeNs().count();
|
||||||
|
u64 placebo = 0;
|
||||||
|
for (std::size_t i = 0; i < 1000; i++) {
|
||||||
|
placebo += core_timing.GetGlobalTimeNs().count();
|
||||||
|
}
|
||||||
|
u64 end = core_timing.GetGlobalTimeNs().count();
|
||||||
|
return (end - start);
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma optimize("", on)
|
||||||
|
|
||||||
|
TEST_CASE("HostTiming[BasicOrder]", "[core]") {
|
||||||
|
ScopeInit guard;
|
||||||
|
auto& core_timing = guard.core_timing;
|
||||||
|
std::vector<std::shared_ptr<Core::HostTiming::EventType>> events{
|
||||||
|
Core::HostTiming::CreateEvent("callbackA", HostCallbackTemplate<0>),
|
||||||
|
Core::HostTiming::CreateEvent("callbackB", HostCallbackTemplate<1>),
|
||||||
|
Core::HostTiming::CreateEvent("callbackC", HostCallbackTemplate<2>),
|
||||||
|
Core::HostTiming::CreateEvent("callbackD", HostCallbackTemplate<3>),
|
||||||
|
Core::HostTiming::CreateEvent("callbackE", HostCallbackTemplate<4>),
|
||||||
|
};
|
||||||
|
|
||||||
|
expected_callback = 0;
|
||||||
|
|
||||||
|
core_timing.SyncPause(true);
|
||||||
|
|
||||||
|
u64 one_micro = 1000U;
|
||||||
|
for (std::size_t i = 0; i < events.size(); i++) {
|
||||||
|
u64 order = calls_order[i];
|
||||||
|
core_timing.ScheduleEvent(i * one_micro + 100U, events[order], CB_IDS[order]);
|
||||||
|
}
|
||||||
|
/// test pause
|
||||||
|
REQUIRE(callbacks_ran_flags.none());
|
||||||
|
|
||||||
|
core_timing.Pause(false); // No need to sync
|
||||||
|
|
||||||
|
while (core_timing.HasPendingEvents())
|
||||||
|
;
|
||||||
|
|
||||||
|
REQUIRE(callbacks_ran_flags.all());
|
||||||
|
|
||||||
|
for (std::size_t i = 0; i < delays.size(); i++) {
|
||||||
|
const double delay = static_cast<double>(delays[i]);
|
||||||
|
const double micro = delay / 1000.0f;
|
||||||
|
const double mili = micro / 1000.0f;
|
||||||
|
printf("HostTimer Pausing Delay[%zu]: %.3f %.6f\n", i, micro, mili);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("HostTiming[BasicOrderNoPausing]", "[core]") {
|
||||||
|
ScopeInit guard;
|
||||||
|
auto& core_timing = guard.core_timing;
|
||||||
|
std::vector<std::shared_ptr<Core::HostTiming::EventType>> events{
|
||||||
|
Core::HostTiming::CreateEvent("callbackA", HostCallbackTemplate<0>),
|
||||||
|
Core::HostTiming::CreateEvent("callbackB", HostCallbackTemplate<1>),
|
||||||
|
Core::HostTiming::CreateEvent("callbackC", HostCallbackTemplate<2>),
|
||||||
|
Core::HostTiming::CreateEvent("callbackD", HostCallbackTemplate<3>),
|
||||||
|
Core::HostTiming::CreateEvent("callbackE", HostCallbackTemplate<4>),
|
||||||
|
};
|
||||||
|
|
||||||
|
core_timing.SyncPause(true);
|
||||||
|
core_timing.SyncPause(false);
|
||||||
|
|
||||||
|
expected_callback = 0;
|
||||||
|
|
||||||
|
u64 start = core_timing.GetGlobalTimeNs().count();
|
||||||
|
u64 one_micro = 1000U;
|
||||||
|
for (std::size_t i = 0; i < events.size(); i++) {
|
||||||
|
u64 order = calls_order[i];
|
||||||
|
core_timing.ScheduleEvent(i * one_micro + 100U, events[order], CB_IDS[order]);
|
||||||
|
}
|
||||||
|
u64 end = core_timing.GetGlobalTimeNs().count();
|
||||||
|
const double scheduling_time = static_cast<double>(end - start);
|
||||||
|
const double timer_time = static_cast<double>(TestTimerSpeed(core_timing));
|
||||||
|
|
||||||
|
while (core_timing.HasPendingEvents())
|
||||||
|
;
|
||||||
|
|
||||||
|
REQUIRE(callbacks_ran_flags.all());
|
||||||
|
|
||||||
|
for (std::size_t i = 0; i < delays.size(); i++) {
|
||||||
|
const double delay = static_cast<double>(delays[i]);
|
||||||
|
const double micro = delay / 1000.0f;
|
||||||
|
const double mili = micro / 1000.0f;
|
||||||
|
printf("HostTimer No Pausing Delay[%zu]: %.3f %.6f\n", i, micro, mili);
|
||||||
|
}
|
||||||
|
|
||||||
|
const double micro = scheduling_time / 1000.0f;
|
||||||
|
const double mili = micro / 1000.0f;
|
||||||
|
printf("HostTimer No Pausing Scheduling Time: %.3f %.6f\n", micro, mili);
|
||||||
|
printf("HostTimer No Pausing Timer Time: %.3f %.6f\n", timer_time / 1000.f,
|
||||||
|
timer_time / 1000000.f);
|
||||||
|
}
|
Loading…
Reference in a new issue