mirror of
https://git.suyu.dev/suyu/suyu.git
synced 2024-11-15 22:54:00 +00:00
service/nvflinger: Store BufferQueue instances as regular data members
The NVFlinger service is already passed into services that need to guarantee its lifetime, so the BufferQueue instances will already live as long as they're needed. Making them std::shared_ptr instances in this case is unnecessary.
This commit is contained in:
parent
fd15730767
commit
90528f1326
7 changed files with 39 additions and 36 deletions
|
@ -78,9 +78,8 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
|
|||
|
||||
const u64 layer_id = next_layer_id++;
|
||||
const u32 buffer_queue_id = next_buffer_queue_id++;
|
||||
auto buffer_queue = std::make_shared<BufferQueue>(buffer_queue_id, layer_id);
|
||||
display->CreateLayer(layer_id, buffer_queue);
|
||||
buffer_queues.emplace_back(std::move(buffer_queue));
|
||||
buffer_queues.emplace_back(buffer_queue_id, layer_id);
|
||||
display->CreateLayer(layer_id, buffer_queues.back());
|
||||
return layer_id;
|
||||
}
|
||||
|
||||
|
@ -104,9 +103,17 @@ Kernel::SharedPtr<Kernel::ReadableEvent> NVFlinger::FindVsyncEvent(u64 display_i
|
|||
return display->GetVSyncEvent();
|
||||
}
|
||||
|
||||
std::shared_ptr<BufferQueue> NVFlinger::FindBufferQueue(u32 id) const {
|
||||
BufferQueue& NVFlinger::FindBufferQueue(u32 id) {
|
||||
const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(),
|
||||
[&](const auto& queue) { return queue->GetId() == id; });
|
||||
[id](const auto& queue) { return queue.GetId() == id; });
|
||||
|
||||
ASSERT(itr != buffer_queues.end());
|
||||
return *itr;
|
||||
}
|
||||
|
||||
const BufferQueue& NVFlinger::FindBufferQueue(u32 id) const {
|
||||
const auto itr = std::find_if(buffer_queues.begin(), buffer_queues.end(),
|
||||
[id](const auto& queue) { return queue.GetId() == id; });
|
||||
|
||||
ASSERT(itr != buffer_queues.end());
|
||||
return *itr;
|
||||
|
|
|
@ -65,7 +65,10 @@ public:
|
|||
Kernel::SharedPtr<Kernel::ReadableEvent> FindVsyncEvent(u64 display_id) const;
|
||||
|
||||
/// Obtains a buffer queue identified by the ID.
|
||||
std::shared_ptr<BufferQueue> FindBufferQueue(u32 id) const;
|
||||
BufferQueue& FindBufferQueue(u32 id);
|
||||
|
||||
/// Obtains a buffer queue identified by the ID.
|
||||
const BufferQueue& FindBufferQueue(u32 id) const;
|
||||
|
||||
/// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when
|
||||
/// finished.
|
||||
|
@ -87,7 +90,7 @@ private:
|
|||
std::shared_ptr<Nvidia::Module> nvdrv;
|
||||
|
||||
std::vector<VI::Display> displays;
|
||||
std::vector<std::shared_ptr<BufferQueue>> buffer_queues;
|
||||
std::vector<BufferQueue> buffer_queues;
|
||||
|
||||
/// Id to use for the next layer that is created, this counter is shared among all displays.
|
||||
u64 next_layer_id = 1;
|
||||
|
|
|
@ -39,11 +39,11 @@ void Display::SignalVSyncEvent() {
|
|||
vsync_event.writable->Signal();
|
||||
}
|
||||
|
||||
void Display::CreateLayer(u64 id, std::shared_ptr<NVFlinger::BufferQueue> buffer_queue) {
|
||||
void Display::CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue) {
|
||||
// TODO(Subv): Support more than 1 layer.
|
||||
ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment");
|
||||
|
||||
layers.emplace_back(id, std::move(buffer_queue));
|
||||
layers.emplace_back(id, buffer_queue);
|
||||
}
|
||||
|
||||
Layer* Display::FindLayer(u64 id) {
|
||||
|
|
|
@ -67,7 +67,7 @@ public:
|
|||
/// @param id The ID to assign to the created layer.
|
||||
/// @param buffer_queue The buffer queue for the layer instance to use.
|
||||
///
|
||||
void CreateLayer(u64 id, std::shared_ptr<NVFlinger::BufferQueue> buffer_queue);
|
||||
void CreateLayer(u64 id, NVFlinger::BufferQueue& buffer_queue);
|
||||
|
||||
/// Attempts to find a layer with the given ID.
|
||||
///
|
||||
|
|
|
@ -2,16 +2,11 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "core/hle/service/vi/layer/vi_layer.h"
|
||||
|
||||
namespace Service::VI {
|
||||
|
||||
Layer::Layer(u64 id, std::shared_ptr<NVFlinger::BufferQueue> queue)
|
||||
: id{id}, buffer_queue{std::move(queue)}
|
||||
{
|
||||
ASSERT_MSG(buffer_queue != nullptr, "buffer_queue may not be null.");
|
||||
}
|
||||
Layer::Layer(u64 id, NVFlinger::BufferQueue& queue) : id{id}, buffer_queue{queue} {}
|
||||
|
||||
Layer::~Layer() = default;
|
||||
|
||||
|
|
|
@ -4,8 +4,6 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Service::NVFlinger {
|
||||
|
@ -22,14 +20,14 @@ public:
|
|||
/// @param id The ID to assign to this layer.
|
||||
/// @param queue The buffer queue for this layer to use.
|
||||
///
|
||||
Layer(u64 id, std::shared_ptr<NVFlinger::BufferQueue> queue);
|
||||
Layer(u64 id, NVFlinger::BufferQueue& queue);
|
||||
~Layer();
|
||||
|
||||
Layer(const Layer&) = delete;
|
||||
Layer& operator=(const Layer&) = delete;
|
||||
|
||||
Layer(Layer&&) = default;
|
||||
Layer& operator=(Layer&&) = default;
|
||||
Layer& operator=(Layer&&) = delete;
|
||||
|
||||
/// Gets the ID for this layer.
|
||||
u64 GetID() const {
|
||||
|
@ -38,17 +36,17 @@ public:
|
|||
|
||||
/// Gets a reference to the buffer queue this layer is using.
|
||||
NVFlinger::BufferQueue& GetBufferQueue() {
|
||||
return *buffer_queue;
|
||||
return buffer_queue;
|
||||
}
|
||||
|
||||
/// Gets a const reference to the buffer queue this layer is using.
|
||||
const NVFlinger::BufferQueue& GetBufferQueue() const {
|
||||
return *buffer_queue;
|
||||
return buffer_queue;
|
||||
}
|
||||
|
||||
private:
|
||||
u64 id;
|
||||
std::shared_ptr<NVFlinger::BufferQueue> buffer_queue;
|
||||
NVFlinger::BufferQueue& buffer_queue;
|
||||
};
|
||||
|
||||
} // namespace Service::VI
|
||||
|
|
|
@ -525,7 +525,7 @@ private:
|
|||
LOG_DEBUG(Service_VI, "called. id=0x{:08X} transaction={:X}, flags=0x{:08X}", id,
|
||||
static_cast<u32>(transaction), flags);
|
||||
|
||||
auto buffer_queue = nv_flinger->FindBufferQueue(id);
|
||||
auto& buffer_queue = nv_flinger->FindBufferQueue(id);
|
||||
|
||||
if (transaction == TransactionId::Connect) {
|
||||
IGBPConnectRequestParcel request{ctx.ReadBuffer()};
|
||||
|
@ -538,7 +538,7 @@ private:
|
|||
} else if (transaction == TransactionId::SetPreallocatedBuffer) {
|
||||
IGBPSetPreallocatedBufferRequestParcel request{ctx.ReadBuffer()};
|
||||
|
||||
buffer_queue->SetPreallocatedBuffer(request.data.slot, request.buffer);
|
||||
buffer_queue.SetPreallocatedBuffer(request.data.slot, request.buffer);
|
||||
|
||||
IGBPSetPreallocatedBufferResponseParcel response{};
|
||||
ctx.WriteBuffer(response.Serialize());
|
||||
|
@ -546,7 +546,7 @@ private:
|
|||
IGBPDequeueBufferRequestParcel request{ctx.ReadBuffer()};
|
||||
const u32 width{request.data.width};
|
||||
const u32 height{request.data.height};
|
||||
std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height);
|
||||
std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height);
|
||||
|
||||
if (slot) {
|
||||
// Buffer is available
|
||||
|
@ -559,8 +559,8 @@ private:
|
|||
[=](Kernel::SharedPtr<Kernel::Thread> thread, Kernel::HLERequestContext& ctx,
|
||||
Kernel::ThreadWakeupReason reason) {
|
||||
// Repeat TransactParcel DequeueBuffer when a buffer is available
|
||||
auto buffer_queue = nv_flinger->FindBufferQueue(id);
|
||||
std::optional<u32> slot = buffer_queue->DequeueBuffer(width, height);
|
||||
auto& buffer_queue = nv_flinger->FindBufferQueue(id);
|
||||
std::optional<u32> slot = buffer_queue.DequeueBuffer(width, height);
|
||||
ASSERT_MSG(slot != std::nullopt, "Could not dequeue buffer.");
|
||||
|
||||
IGBPDequeueBufferResponseParcel response{*slot};
|
||||
|
@ -568,28 +568,28 @@ private:
|
|||
IPC::ResponseBuilder rb{ctx, 2};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
},
|
||||
buffer_queue->GetWritableBufferWaitEvent());
|
||||
buffer_queue.GetWritableBufferWaitEvent());
|
||||
}
|
||||
} else if (transaction == TransactionId::RequestBuffer) {
|
||||
IGBPRequestBufferRequestParcel request{ctx.ReadBuffer()};
|
||||
|
||||
auto& buffer = buffer_queue->RequestBuffer(request.slot);
|
||||
auto& buffer = buffer_queue.RequestBuffer(request.slot);
|
||||
|
||||
IGBPRequestBufferResponseParcel response{buffer};
|
||||
ctx.WriteBuffer(response.Serialize());
|
||||
} else if (transaction == TransactionId::QueueBuffer) {
|
||||
IGBPQueueBufferRequestParcel request{ctx.ReadBuffer()};
|
||||
|
||||
buffer_queue->QueueBuffer(request.data.slot, request.data.transform,
|
||||
request.data.GetCropRect());
|
||||
buffer_queue.QueueBuffer(request.data.slot, request.data.transform,
|
||||
request.data.GetCropRect());
|
||||
|
||||
IGBPQueueBufferResponseParcel response{1280, 720};
|
||||
ctx.WriteBuffer(response.Serialize());
|
||||
} else if (transaction == TransactionId::Query) {
|
||||
IGBPQueryRequestParcel request{ctx.ReadBuffer()};
|
||||
|
||||
u32 value =
|
||||
buffer_queue->Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type));
|
||||
const u32 value =
|
||||
buffer_queue.Query(static_cast<NVFlinger::BufferQueue::QueryType>(request.type));
|
||||
|
||||
IGBPQueryResponseParcel response{value};
|
||||
ctx.WriteBuffer(response.Serialize());
|
||||
|
@ -629,12 +629,12 @@ private:
|
|||
|
||||
LOG_WARNING(Service_VI, "(STUBBED) called id={}, unknown={:08X}", id, unknown);
|
||||
|
||||
const auto buffer_queue = nv_flinger->FindBufferQueue(id);
|
||||
const auto& buffer_queue = nv_flinger->FindBufferQueue(id);
|
||||
|
||||
// TODO(Subv): Find out what this actually is.
|
||||
IPC::ResponseBuilder rb{ctx, 2, 1};
|
||||
rb.Push(RESULT_SUCCESS);
|
||||
rb.PushCopyObjects(buffer_queue->GetBufferWaitEvent());
|
||||
rb.PushCopyObjects(buffer_queue.GetBufferWaitEvent());
|
||||
}
|
||||
|
||||
std::shared_ptr<NVFlinger::NVFlinger> nv_flinger;
|
||||
|
|
Loading…
Reference in a new issue