mirror of
https://git.suyu.dev/suyu/suyu
synced 2025-08-27 06:26:32 -05:00
scope_exit: Make constexpr
Allows the use of the macro in constexpr-contexts. Also avoids some potential problems when nesting braces inside it.
This commit is contained in:
@@ -24,7 +24,9 @@ Result KClientSession::SendSyncRequest(uintptr_t address, size_t size) {
|
||||
// Create a session request.
|
||||
KSessionRequest* request = KSessionRequest::Create(m_kernel);
|
||||
R_UNLESS(request != nullptr, ResultOutOfResource);
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
SCOPE_EXIT {
|
||||
request->Close();
|
||||
};
|
||||
|
||||
// Initialize the request.
|
||||
request->Initialize(nullptr, address, size);
|
||||
@@ -37,7 +39,9 @@ Result KClientSession::SendAsyncRequest(KEvent* event, uintptr_t address, size_t
|
||||
// Create a session request.
|
||||
KSessionRequest* request = KSessionRequest::Create(m_kernel);
|
||||
R_UNLESS(request != nullptr, ResultOutOfResource);
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
SCOPE_EXIT {
|
||||
request->Close();
|
||||
};
|
||||
|
||||
// Initialize the request.
|
||||
request->Initialize(event, address, size);
|
||||
|
@@ -1305,11 +1305,11 @@ Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddr
|
||||
|
||||
// Ensure that we maintain the instruction cache.
|
||||
bool reprotected_pages = false;
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
if (reprotected_pages && any_code_pages) {
|
||||
InvalidateInstructionCache(m_kernel, this, dst_address, size);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Unmap.
|
||||
{
|
||||
@@ -1397,7 +1397,9 @@ Result KPageTableBase::MapInsecureMemory(KProcessAddress address, size_t size) {
|
||||
// Close the opened pages when we're done with them.
|
||||
// If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
|
||||
// automatically.
|
||||
SCOPE_EXIT({ pg.Close(); });
|
||||
SCOPE_EXIT {
|
||||
pg.Close();
|
||||
};
|
||||
|
||||
// Clear all the newly allocated pages.
|
||||
for (const auto& it : pg) {
|
||||
@@ -1603,7 +1605,9 @@ Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList* page_list, KProce
|
||||
m_kernel.MemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
|
||||
|
||||
// Ensure that the page group is closed when we're done working with it.
|
||||
SCOPE_EXIT({ pg.Close(); });
|
||||
SCOPE_EXIT {
|
||||
pg.Close();
|
||||
};
|
||||
|
||||
// Clear all pages.
|
||||
for (const auto& it : pg) {
|
||||
@@ -2191,7 +2195,9 @@ Result KPageTableBase::SetHeapSize(KProcessAddress* out, size_t size) {
|
||||
// Close the opened pages when we're done with them.
|
||||
// If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed
|
||||
// automatically.
|
||||
SCOPE_EXIT({ pg.Close(); });
|
||||
SCOPE_EXIT {
|
||||
pg.Close();
|
||||
};
|
||||
|
||||
// Clear all the newly allocated pages.
|
||||
for (const auto& it : pg) {
|
||||
@@ -2592,7 +2598,9 @@ Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddre
|
||||
// Temporarily unlock ourselves, so that other operations can occur while we flush the
|
||||
// region.
|
||||
m_general_lock.Unlock();
|
||||
SCOPE_EXIT({ m_general_lock.Lock(); });
|
||||
SCOPE_EXIT {
|
||||
m_general_lock.Lock();
|
||||
};
|
||||
|
||||
// Flush the region.
|
||||
R_ASSERT(FlushDataCache(dst_address, size));
|
||||
@@ -3311,10 +3319,10 @@ Result KPageTableBase::ReadIoMemoryImpl(KProcessAddress dst_addr, KPhysicalAddre
|
||||
// Ensure we unmap the io memory when we're done with it.
|
||||
const KPageProperties unmap_properties =
|
||||
KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
|
||||
unmap_properties, OperationType::Unmap, true));
|
||||
});
|
||||
};
|
||||
|
||||
// Read the memory.
|
||||
const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
|
||||
@@ -3347,10 +3355,10 @@ Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, KProcessAdd
|
||||
// Ensure we unmap the io memory when we're done with it.
|
||||
const KPageProperties unmap_properties =
|
||||
KPageProperties{KMemoryPermission::None, false, false, DisableMergeAttribute::None};
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
R_ASSERT(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, 0, false,
|
||||
unmap_properties, OperationType::Unmap, true));
|
||||
});
|
||||
};
|
||||
|
||||
// Write the memory.
|
||||
const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
|
||||
@@ -4491,14 +4499,14 @@ Result KPageTableBase::SetupForIpcServer(KProcessAddress* out_addr, size_t size,
|
||||
|
||||
// If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
|
||||
// free on scope exit.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
if (start_partial_page != 0) {
|
||||
m_kernel.MemoryManager().Close(start_partial_page, 1);
|
||||
}
|
||||
if (end_partial_page != 0) {
|
||||
m_kernel.MemoryManager().Close(end_partial_page, 1);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
ON_RESULT_FAILURE {
|
||||
if (cur_mapped_addr != dst_addr) {
|
||||
@@ -5166,10 +5174,10 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
|
||||
GetCurrentProcess(m_kernel).GetId(), m_heap_fill_value));
|
||||
|
||||
// If we fail in the next bit (or retry), we need to cleanup the pages.
|
||||
auto pg_guard = SCOPE_GUARD({
|
||||
auto pg_guard = SCOPE_GUARD {
|
||||
pg.OpenFirst();
|
||||
pg.Close();
|
||||
});
|
||||
};
|
||||
|
||||
// Map the memory.
|
||||
{
|
||||
@@ -5694,7 +5702,9 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
|
||||
|
||||
// Ensure that any pages we track are closed on exit.
|
||||
KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
|
||||
SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
|
||||
SCOPE_EXIT {
|
||||
pages_to_close.CloseAndReset();
|
||||
};
|
||||
|
||||
// Make a page group representing the region to unmap.
|
||||
this->MakePageGroup(pages_to_close, virt_addr, num_pages);
|
||||
|
@@ -77,7 +77,9 @@ Result TerminateChildren(KernelCore& kernel, KProcess* process,
|
||||
}
|
||||
|
||||
// Terminate and close the thread.
|
||||
SCOPE_EXIT({ cur_child->Close(); });
|
||||
SCOPE_EXIT {
|
||||
cur_child->Close();
|
||||
};
|
||||
|
||||
if (const Result terminate_result = cur_child->Terminate();
|
||||
ResultTerminationRequested == terminate_result) {
|
||||
@@ -466,11 +468,11 @@ void KProcess::DoWorkerTaskImpl() {
|
||||
|
||||
Result KProcess::StartTermination() {
|
||||
// Finalize the handle table when we're done, if the process isn't immortal.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
if (!m_is_immortal) {
|
||||
this->FinalizeHandleTable();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Terminate child threads other than the current one.
|
||||
R_RETURN(TerminateChildren(m_kernel, this, GetCurrentThreadPointer(m_kernel)));
|
||||
@@ -964,7 +966,9 @@ Result KProcess::Run(s32 priority, size_t stack_size) {
|
||||
// Create a new thread for the process.
|
||||
KThread* main_thread = KThread::Create(m_kernel);
|
||||
R_UNLESS(main_thread != nullptr, ResultOutOfResource);
|
||||
SCOPE_EXIT({ main_thread->Close(); });
|
||||
SCOPE_EXIT {
|
||||
main_thread->Close();
|
||||
};
|
||||
|
||||
// Initialize the thread.
|
||||
R_TRY(KThread::InitializeUserThread(m_kernel.System(), main_thread, this->GetEntryPoint(), 0,
|
||||
@@ -1155,7 +1159,9 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||
Kernel::CreateResourceLimitForProcess(m_kernel.System(), physical_memory_size);
|
||||
|
||||
// Ensure we maintain a clean state on exit.
|
||||
SCOPE_EXIT({ res_limit->Close(); });
|
||||
SCOPE_EXIT {
|
||||
res_limit->Close();
|
||||
};
|
||||
|
||||
// Declare flags and code address.
|
||||
Svc::CreateProcessFlag flag{};
|
||||
|
@@ -651,11 +651,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
|
||||
// Process any special data.
|
||||
if (src_header.GetHasSpecialHeader()) {
|
||||
// After we process, make sure we track whether the receive list is broken.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
if (offset > dst_recv_list_idx) {
|
||||
recv_list_broken = true;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Process special data.
|
||||
R_TRY(ProcessMessageSpecialData<false>(offset, dst_process, src_process, src_thread,
|
||||
@@ -665,11 +665,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
|
||||
// Process any pointer buffers.
|
||||
for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
|
||||
// After we process, make sure we track whether the receive list is broken.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
if (offset > dst_recv_list_idx) {
|
||||
recv_list_broken = true;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
R_TRY(ProcessReceiveMessagePointerDescriptors(
|
||||
offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list,
|
||||
@@ -680,11 +680,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
|
||||
// Process any map alias buffers.
|
||||
for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
|
||||
// After we process, make sure we track whether the receive list is broken.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
if (offset > dst_recv_list_idx) {
|
||||
recv_list_broken = true;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// We process in order send, recv, exch. Buffers after send (recv/exch) are ReadWrite.
|
||||
const KMemoryPermission perm = (i >= src_header.GetSendCount())
|
||||
@@ -702,11 +702,11 @@ Result ReceiveMessage(KernelCore& kernel, bool& recv_list_broken, uint64_t dst_m
|
||||
// Process any raw data.
|
||||
if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
|
||||
// After we process, make sure we track whether the receive list is broken.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
if (offset + raw_count > dst_recv_list_idx) {
|
||||
recv_list_broken = true;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Get the offset and size.
|
||||
const size_t offset_words = offset * sizeof(u32);
|
||||
@@ -1124,7 +1124,9 @@ Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server
|
||||
client_thread->Open();
|
||||
}
|
||||
|
||||
SCOPE_EXIT({ client_thread->Close(); });
|
||||
SCOPE_EXIT {
|
||||
client_thread->Close();
|
||||
};
|
||||
|
||||
// Set the request as our current.
|
||||
m_current_request = request;
|
||||
@@ -1174,7 +1176,9 @@ Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server
|
||||
// Reply to the client.
|
||||
{
|
||||
// After we reply, close our reference to the request.
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
SCOPE_EXIT {
|
||||
request->Close();
|
||||
};
|
||||
|
||||
// Get the event to check whether the request is async.
|
||||
if (KEvent* event = request->GetEvent(); event != nullptr) {
|
||||
@@ -1236,7 +1240,9 @@ Result KServerSession::SendReply(uintptr_t server_message, uintptr_t server_buff
|
||||
}
|
||||
|
||||
// Close reference to the request once we're done processing it.
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
SCOPE_EXIT {
|
||||
request->Close();
|
||||
};
|
||||
|
||||
// Extract relevant information from the request.
|
||||
const uint64_t client_message = request->GetAddress();
|
||||
@@ -1394,7 +1400,9 @@ void KServerSession::CleanupRequests() {
|
||||
}
|
||||
|
||||
// Close a reference to the request once it's cleaned up.
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
SCOPE_EXIT {
|
||||
request->Close();
|
||||
};
|
||||
|
||||
// Extract relevant information from the request.
|
||||
const uint64_t client_message = request->GetAddress();
|
||||
@@ -1491,7 +1499,9 @@ void KServerSession::OnClientClosed() {
|
||||
ASSERT(thread != nullptr);
|
||||
|
||||
// Ensure that we close the request when done.
|
||||
SCOPE_EXIT({ request->Close(); });
|
||||
SCOPE_EXIT {
|
||||
request->Close();
|
||||
};
|
||||
|
||||
// If we're terminating, close a reference to the thread and event.
|
||||
if (terminate) {
|
||||
|
@@ -21,7 +21,9 @@ Result KThreadLocalPage::Initialize(KernelCore& kernel, KProcess* process) {
|
||||
// Allocate a new page.
|
||||
KPageBuffer* page_buf = KPageBuffer::Allocate(kernel);
|
||||
R_UNLESS(page_buf != nullptr, ResultOutOfMemory);
|
||||
auto page_buf_guard = SCOPE_GUARD({ KPageBuffer::Free(kernel, page_buf); });
|
||||
auto page_buf_guard = SCOPE_GUARD {
|
||||
KPageBuffer::Free(kernel, page_buf);
|
||||
};
|
||||
|
||||
// Map the address in.
|
||||
const auto phys_addr = kernel.System().DeviceMemory().GetPhysicalAddr(page_buf);
|
||||
|
@@ -24,7 +24,9 @@ Result KTransferMemory::Initialize(KProcessAddress addr, std::size_t size,
|
||||
|
||||
// Construct the page group, guarding to make sure our state is valid on exit.
|
||||
m_page_group.emplace(m_kernel, page_table.GetBlockInfoManager());
|
||||
auto pg_guard = SCOPE_GUARD({ m_page_group.reset(); });
|
||||
auto pg_guard = SCOPE_GUARD {
|
||||
m_page_group.reset();
|
||||
};
|
||||
|
||||
// Lock the memory.
|
||||
R_TRY(page_table.LockForTransferMemory(std::addressof(*m_page_group), addr, size,
|
||||
|
@@ -109,7 +109,9 @@ struct KernelCore::Impl {
|
||||
|
||||
void Shutdown() {
|
||||
is_shutting_down.store(true, std::memory_order_relaxed);
|
||||
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
||||
SCOPE_EXIT {
|
||||
is_shutting_down.store(false, std::memory_order_relaxed);
|
||||
};
|
||||
|
||||
CloseServices();
|
||||
|
||||
@@ -1080,7 +1082,9 @@ std::jthread KernelCore::RunOnHostCoreProcess(std::string&& process_name,
|
||||
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
|
||||
|
||||
// Ensure that we don't hold onto any extra references.
|
||||
SCOPE_EXIT({ process->Close(); });
|
||||
SCOPE_EXIT {
|
||||
process->Close();
|
||||
};
|
||||
|
||||
// Register the new process.
|
||||
KProcess::Register(*this, process);
|
||||
@@ -1108,7 +1112,9 @@ void KernelCore::RunOnGuestCoreProcess(std::string&& process_name, std::function
|
||||
process->Initialize(Svc::CreateProcessParameter{}, GetSystemResourceLimit(), false)));
|
||||
|
||||
// Ensure that we don't hold onto any extra references.
|
||||
SCOPE_EXIT({ process->Close(); });
|
||||
SCOPE_EXIT {
|
||||
process->Close();
|
||||
};
|
||||
|
||||
// Register the new process.
|
||||
KProcess::Register(*this, process);
|
||||
|
@@ -45,7 +45,9 @@ Result CreateCodeMemory(Core::System& system, Handle* out, u64 address, uint64_t
|
||||
|
||||
KCodeMemory* code_mem = KCodeMemory::Create(kernel);
|
||||
R_UNLESS(code_mem != nullptr, ResultOutOfResource);
|
||||
SCOPE_EXIT({ code_mem->Close(); });
|
||||
SCOPE_EXIT {
|
||||
code_mem->Close();
|
||||
};
|
||||
|
||||
// Verify that the region is in range.
|
||||
R_UNLESS(GetCurrentProcess(system.Kernel()).GetPageTable().Contains(address, size),
|
||||
|
@@ -28,7 +28,9 @@ Result CreateDeviceAddressSpace(Core::System& system, Handle* out, uint64_t das_
|
||||
// Create the device address space.
|
||||
KDeviceAddressSpace* das = KDeviceAddressSpace::Create(system.Kernel());
|
||||
R_UNLESS(das != nullptr, ResultOutOfResource);
|
||||
SCOPE_EXIT({ das->Close(); });
|
||||
SCOPE_EXIT {
|
||||
das->Close();
|
||||
};
|
||||
|
||||
// Initialize the device address space.
|
||||
R_TRY(das->Initialize(das_address, das_size));
|
||||
|
@@ -72,10 +72,10 @@ Result CreateEvent(Core::System& system, Handle* out_write, Handle* out_read) {
|
||||
event_reservation.Commit();
|
||||
|
||||
// Ensure that we clean up the event (and its only references are handle table) on function end.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
event->GetReadableEvent().Close();
|
||||
event->Close();
|
||||
});
|
||||
};
|
||||
|
||||
// Register the event.
|
||||
KEvent::Register(kernel, event);
|
||||
|
@@ -129,11 +129,11 @@ Result ReplyAndReceiveImpl(KernelCore& kernel, int32_t* out_index, uintptr_t mes
|
||||
}
|
||||
|
||||
// Ensure handles are closed when we're done.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
for (auto i = 0; i < num_handles; ++i) {
|
||||
objs[i]->Close();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
R_RETURN(ReplyAndReceiveImpl(kernel, out_index, message, buffer_size, message_paddr, objs,
|
||||
num_handles, reply_target, timeout_ns));
|
||||
@@ -208,10 +208,10 @@ Result SendAsyncRequestWithUserBuffer(Core::System& system, Handle* out_event_ha
|
||||
event_reservation.Commit();
|
||||
|
||||
// At end of scope, kill the standing references to the sub events.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
event->GetReadableEvent().Close();
|
||||
event->Close();
|
||||
});
|
||||
};
|
||||
|
||||
// Register the event.
|
||||
KEvent::Register(system.Kernel(), event);
|
||||
|
@@ -68,10 +68,10 @@ Result CreatePort(Core::System& system, Handle* out_server, Handle* out_client,
|
||||
port->Initialize(max_sessions, is_light, name);
|
||||
|
||||
// Ensure that we clean up the port (and its only references are handle table) on function end.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
port->GetServerPort().Close();
|
||||
port->GetClientPort().Close();
|
||||
});
|
||||
};
|
||||
|
||||
// Register the port.
|
||||
KPort::Register(kernel, port);
|
||||
@@ -150,10 +150,10 @@ Result ManageNamedPort(Core::System& system, Handle* out_server_handle, uint64_t
|
||||
KPort::Register(system.Kernel(), port);
|
||||
|
||||
// Ensure that our only reference to the port is in the handle table when we're done.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
port->GetClientPort().Close();
|
||||
port->GetServerPort().Close();
|
||||
});
|
||||
};
|
||||
|
||||
// Register the handle in the table.
|
||||
R_TRY(handle_table.Add(out_server_handle, std::addressof(port->GetServerPort())));
|
||||
|
@@ -18,7 +18,9 @@ Result CreateResourceLimit(Core::System& system, Handle* out_handle) {
|
||||
R_UNLESS(resource_limit != nullptr, ResultOutOfResource);
|
||||
|
||||
// Ensure we don't leak a reference to the limit.
|
||||
SCOPE_EXIT({ resource_limit->Close(); });
|
||||
SCOPE_EXIT {
|
||||
resource_limit->Close();
|
||||
};
|
||||
|
||||
// Initialize the resource limit.
|
||||
resource_limit->Initialize();
|
||||
|
@@ -69,10 +69,10 @@ Result CreateSession(Core::System& system, Handle* out_server, Handle* out_clien
|
||||
|
||||
// Ensure that we clean up the session (and its only references are handle table) on function
|
||||
// end.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
session->GetClientSession().Close();
|
||||
session->GetServerSession().Close();
|
||||
});
|
||||
};
|
||||
|
||||
// Register the session.
|
||||
T::Register(system.Kernel(), session);
|
||||
|
@@ -78,11 +78,11 @@ Result WaitSynchronization(Core::System& system, int32_t* out_index, u64 user_ha
|
||||
}
|
||||
|
||||
// Ensure handles are closed when we're done.
|
||||
SCOPE_EXIT({
|
||||
SCOPE_EXIT {
|
||||
for (auto i = 0; i < num_handles; ++i) {
|
||||
objs[i]->Close();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Convert the timeout from nanoseconds to ticks.
|
||||
s64 timeout;
|
||||
|
@@ -51,7 +51,9 @@ Result CreateThread(Core::System& system, Handle* out_handle, u64 entry_point, u
|
||||
// Create the thread.
|
||||
KThread* thread = KThread::Create(kernel);
|
||||
R_UNLESS(thread != nullptr, ResultOutOfResource)
|
||||
SCOPE_EXIT({ thread->Close(); });
|
||||
SCOPE_EXIT {
|
||||
thread->Close();
|
||||
};
|
||||
|
||||
// Initialize the thread.
|
||||
{
|
||||
|
@@ -52,7 +52,9 @@ Result CreateTransferMemory(Core::System& system, Handle* out, u64 address, u64
|
||||
R_UNLESS(trmem != nullptr, ResultOutOfResource);
|
||||
|
||||
// Ensure the only reference is in the handle table when we're done.
|
||||
SCOPE_EXIT({ trmem->Close(); });
|
||||
SCOPE_EXIT {
|
||||
trmem->Close();
|
||||
};
|
||||
|
||||
// Ensure that the region is in range.
|
||||
R_UNLESS(process.GetPageTable().Contains(address, size), ResultInvalidCurrentMemory);
|
||||
|
Reference in New Issue
Block a user