mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-15 22:54:00 +00:00
Merge pull request #2905 from danzel/fix-2902
Use recursive_mutex instead of mutex to fix #2902
This commit is contained in:
commit
acbd46366c
4 changed files with 5 additions and 5 deletions
|
@ -7,5 +7,5 @@
|
|||
#include <core/hle/lock.h>
|
||||
|
||||
namespace HLE {
|
||||
std::mutex g_hle_lock;
|
||||
std::recursive_mutex g_hle_lock;
|
||||
}
|
||||
|
|
|
@ -14,5 +14,5 @@ namespace HLE {
|
|||
* to the emulated memory is not protected by this mutex, and should be avoided in any threads other
|
||||
* than the CPU thread.
|
||||
*/
|
||||
extern std::mutex g_hle_lock;
|
||||
extern std::recursive_mutex g_hle_lock;
|
||||
} // namespace HLE
|
||||
|
|
|
@ -1334,7 +1334,7 @@ void CallSVC(u32 immediate) {
|
|||
MICROPROFILE_SCOPE(Kernel_SVC);
|
||||
|
||||
// Lock the global kernel mutex when we enter the kernel HLE.
|
||||
std::lock_guard<std::mutex> lock(HLE::g_hle_lock);
|
||||
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
|
||||
|
||||
const FunctionDef* info = GetSVCInfo(immediate);
|
||||
if (info) {
|
||||
|
|
|
@ -183,7 +183,7 @@ T Read(const VAddr vaddr) {
|
|||
}
|
||||
|
||||
// The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
|
||||
std::lock_guard<std::mutex> lock(HLE::g_hle_lock);
|
||||
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
|
||||
|
||||
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||
switch (type) {
|
||||
|
@ -224,7 +224,7 @@ void Write(const VAddr vaddr, const T data) {
|
|||
}
|
||||
|
||||
// The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
|
||||
std::lock_guard<std::mutex> lock(HLE::g_hle_lock);
|
||||
std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
|
||||
|
||||
PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
|
||||
switch (type) {
|
||||
|
|
Loading…
Reference in a new issue