mirror of
				https://git.suyu.dev/suyu/suyu
				synced 2025-10-30 23:49:01 -05:00 
			
		
		
		
	Port #4182 from Citra: "Prefix all size_t with std::"
This commit is contained in:
		| @@ -12,7 +12,7 @@ | ||||
| namespace IPC { | ||||
|  | ||||
| /// Size of the command buffer area, in 32-bit words. | ||||
| constexpr size_t COMMAND_BUFFER_LENGTH = 0x100 / sizeof(u32); | ||||
| constexpr std::size_t COMMAND_BUFFER_LENGTH = 0x100 / sizeof(u32); | ||||
|  | ||||
| // These errors are commonly returned by invalid IPC translations, so alias them here for | ||||
| // convenience. | ||||
|   | ||||
| @@ -152,8 +152,8 @@ public: | ||||
|     } | ||||
|  | ||||
|     void ValidateHeader() { | ||||
|         const size_t num_domain_objects = context->NumDomainObjects(); | ||||
|         const size_t num_move_objects = context->NumMoveObjects(); | ||||
|         const std::size_t num_domain_objects = context->NumDomainObjects(); | ||||
|         const std::size_t num_move_objects = context->NumMoveObjects(); | ||||
|         ASSERT_MSG(!num_domain_objects || !num_move_objects, | ||||
|                    "cannot move normal handles and domain objects"); | ||||
|         ASSERT_MSG((index - datapayload_index) == normal_params_size, | ||||
| @@ -329,10 +329,10 @@ public: | ||||
|     T PopRaw(); | ||||
|  | ||||
|     template <typename T> | ||||
|     Kernel::SharedPtr<T> GetMoveObject(size_t index); | ||||
|     Kernel::SharedPtr<T> GetMoveObject(std::size_t index); | ||||
|  | ||||
|     template <typename T> | ||||
|     Kernel::SharedPtr<T> GetCopyObject(size_t index); | ||||
|     Kernel::SharedPtr<T> GetCopyObject(std::size_t index); | ||||
|  | ||||
|     template <class T> | ||||
|     std::shared_ptr<T> PopIpcInterface() { | ||||
| @@ -406,12 +406,12 @@ void RequestParser::Pop(First& first_value, Other&... other_values) { | ||||
| } | ||||
|  | ||||
| template <typename T> | ||||
| Kernel::SharedPtr<T> RequestParser::GetMoveObject(size_t index) { | ||||
| Kernel::SharedPtr<T> RequestParser::GetMoveObject(std::size_t index) { | ||||
|     return context->GetMoveObject<T>(index); | ||||
| } | ||||
|  | ||||
| template <typename T> | ||||
| Kernel::SharedPtr<T> RequestParser::GetCopyObject(size_t index) { | ||||
| Kernel::SharedPtr<T> RequestParser::GetCopyObject(std::size_t index) { | ||||
|     return context->GetCopyObject<T>(index); | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -35,16 +35,17 @@ static ResultCode WaitForAddress(VAddr address, s64 timeout) { | ||||
|  | ||||
| // Gets the threads waiting on an address. | ||||
| static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) { | ||||
|     const auto RetrieveWaitingThreads = | ||||
|         [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr arb_addr) { | ||||
|             const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); | ||||
|             auto& thread_list = scheduler->GetThreadList(); | ||||
|     const auto RetrieveWaitingThreads = [](std::size_t core_index, | ||||
|                                            std::vector<SharedPtr<Thread>>& waiting_threads, | ||||
|                                            VAddr arb_addr) { | ||||
|         const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); | ||||
|         auto& thread_list = scheduler->GetThreadList(); | ||||
|  | ||||
|             for (auto& thread : thread_list) { | ||||
|                 if (thread->arb_wait_address == arb_addr) | ||||
|                     waiting_threads.push_back(thread); | ||||
|             } | ||||
|         }; | ||||
|         for (auto& thread : thread_list) { | ||||
|             if (thread->arb_wait_address == arb_addr) | ||||
|                 waiting_threads.push_back(thread); | ||||
|         } | ||||
|     }; | ||||
|  | ||||
|     // Retrieve all threads that are waiting for this address. | ||||
|     std::vector<SharedPtr<Thread>> threads; | ||||
| @@ -66,12 +67,12 @@ static std::vector<SharedPtr<Thread>> GetThreadsWaitingOnAddress(VAddr address) | ||||
| static void WakeThreads(std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_to_wake) { | ||||
|     // Only process up to 'target' threads, unless 'target' is <= 0, in which case process | ||||
|     // them all. | ||||
|     size_t last = waiting_threads.size(); | ||||
|     std::size_t last = waiting_threads.size(); | ||||
|     if (num_to_wake > 0) | ||||
|         last = num_to_wake; | ||||
|  | ||||
|     // Signal the waiting threads. | ||||
|     for (size_t i = 0; i < last; i++) { | ||||
|     for (std::size_t i = 0; i < last; i++) { | ||||
|         ASSERT(waiting_threads[i]->status == ThreadStatus::WaitArb); | ||||
|         waiting_threads[i]->SetWaitSynchronizationResult(RESULT_SUCCESS); | ||||
|         waiting_threads[i]->arb_wait_address = 0; | ||||
|   | ||||
| @@ -65,7 +65,7 @@ ResultCode HandleTable::Close(Handle handle) { | ||||
| } | ||||
|  | ||||
| bool HandleTable::IsValid(Handle handle) const { | ||||
|     size_t slot = GetSlot(handle); | ||||
|     std::size_t slot = GetSlot(handle); | ||||
|     u16 generation = GetGeneration(handle); | ||||
|  | ||||
|     return slot < MAX_COUNT && objects[slot] != nullptr && generations[slot] == generation; | ||||
|   | ||||
| @@ -93,7 +93,7 @@ private: | ||||
|      * This is the maximum limit of handles allowed per process in CTR-OS. It can be further | ||||
|      * reduced by ExHeader values, but this is not emulated here. | ||||
|      */ | ||||
|     static const size_t MAX_COUNT = 4096; | ||||
|     static const std::size_t MAX_COUNT = 4096; | ||||
|  | ||||
|     static u16 GetSlot(Handle handle) { | ||||
|         return handle >> 15; | ||||
|   | ||||
| @@ -42,9 +42,9 @@ SharedPtr<Event> HLERequestContext::SleepClientThread(SharedPtr<Thread> thread, | ||||
|                                                       Kernel::SharedPtr<Kernel::Event> event) { | ||||
|  | ||||
|     // Put the client thread to sleep until the wait event is signaled or the timeout expires. | ||||
|     thread->wakeup_callback = | ||||
|         [context = *this, callback](ThreadWakeupReason reason, SharedPtr<Thread> thread, | ||||
|                                     SharedPtr<WaitObject> object, size_t index) mutable -> bool { | ||||
|     thread->wakeup_callback = [context = *this, callback]( | ||||
|                                   ThreadWakeupReason reason, SharedPtr<Thread> thread, | ||||
|                                   SharedPtr<WaitObject> object, std::size_t index) mutable -> bool { | ||||
|         ASSERT(thread->status == ThreadStatus::WaitHLEEvent); | ||||
|         callback(thread, context, reason); | ||||
|         context.WriteToOutgoingCommandBuffer(*thread); | ||||
| @@ -199,8 +199,8 @@ ResultCode HLERequestContext::PopulateFromIncomingCommandBuffer(u32_le* src_cmdb | ||||
|     } | ||||
|  | ||||
|     // The data_size already includes the payload header, the padding and the domain header. | ||||
|     size_t size = data_payload_offset + command_header->data_size - | ||||
|                   sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; | ||||
|     std::size_t size = data_payload_offset + command_header->data_size - | ||||
|                        sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; | ||||
|     if (domain_message_header) | ||||
|         size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32); | ||||
|     std::copy_n(src_cmdbuf, size, cmd_buf.begin()); | ||||
| @@ -217,8 +217,8 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) | ||||
|     ParseCommandBuffer(cmd_buf.data(), false); | ||||
|  | ||||
|     // The data_size already includes the payload header, the padding and the domain header. | ||||
|     size_t size = data_payload_offset + command_header->data_size - | ||||
|                   sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; | ||||
|     std::size_t size = data_payload_offset + command_header->data_size - | ||||
|                        sizeof(IPC::DataPayloadHeader) / sizeof(u32) - 4; | ||||
|     if (domain_message_header) | ||||
|         size -= sizeof(IPC::DomainMessageHeader) / sizeof(u32); | ||||
|  | ||||
| @@ -229,7 +229,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) | ||||
|                    "Handle descriptor bit set but no handles to translate"); | ||||
|         // We write the translated handles at a specific offset in the command buffer, this space | ||||
|         // was already reserved when writing the header. | ||||
|         size_t current_offset = | ||||
|         std::size_t current_offset = | ||||
|             (sizeof(IPC::CommandHeader) + sizeof(IPC::HandleDescriptorHeader)) / sizeof(u32); | ||||
|         ASSERT_MSG(!handle_descriptor_header->send_current_pid, "Sending PID is not implemented"); | ||||
|  | ||||
| @@ -258,7 +258,7 @@ ResultCode HLERequestContext::WriteToOutgoingCommandBuffer(const Thread& thread) | ||||
|         ASSERT(domain_message_header->num_objects == domain_objects.size()); | ||||
|         // Write the domain objects to the command buffer, these go after the raw untranslated data. | ||||
|         // TODO(Subv): This completely ignores C buffers. | ||||
|         size_t domain_offset = size - domain_message_header->num_objects; | ||||
|         std::size_t domain_offset = size - domain_message_header->num_objects; | ||||
|         auto& request_handlers = server_session->domain_request_handlers; | ||||
|  | ||||
|         for (auto& object : domain_objects) { | ||||
| @@ -291,14 +291,15 @@ std::vector<u8> HLERequestContext::ReadBuffer(int buffer_index) const { | ||||
|     return buffer; | ||||
| } | ||||
|  | ||||
| size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffer_index) const { | ||||
| std::size_t HLERequestContext::WriteBuffer(const void* buffer, std::size_t size, | ||||
|                                            int buffer_index) const { | ||||
|     if (size == 0) { | ||||
|         LOG_WARNING(Core, "skip empty buffer write"); | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
|     const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()}; | ||||
|     const size_t buffer_size{GetWriteBufferSize(buffer_index)}; | ||||
|     const std::size_t buffer_size{GetWriteBufferSize(buffer_index)}; | ||||
|     if (size > buffer_size) { | ||||
|         LOG_CRITICAL(Core, "size ({:016X}) is greater than buffer_size ({:016X})", size, | ||||
|                      buffer_size); | ||||
| @@ -314,13 +315,13 @@ size_t HLERequestContext::WriteBuffer(const void* buffer, size_t size, int buffe | ||||
|     return size; | ||||
| } | ||||
|  | ||||
| size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { | ||||
| std::size_t HLERequestContext::GetReadBufferSize(int buffer_index) const { | ||||
|     const bool is_buffer_a{BufferDescriptorA().size() && BufferDescriptorA()[buffer_index].Size()}; | ||||
|     return is_buffer_a ? BufferDescriptorA()[buffer_index].Size() | ||||
|                        : BufferDescriptorX()[buffer_index].Size(); | ||||
| } | ||||
|  | ||||
| size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const { | ||||
| std::size_t HLERequestContext::GetWriteBufferSize(int buffer_index) const { | ||||
|     const bool is_buffer_b{BufferDescriptorB().size() && BufferDescriptorB()[buffer_index].Size()}; | ||||
|     return is_buffer_b ? BufferDescriptorB()[buffer_index].Size() | ||||
|                        : BufferDescriptorC()[buffer_index].Size(); | ||||
|   | ||||
| @@ -170,7 +170,7 @@ public: | ||||
|     std::vector<u8> ReadBuffer(int buffer_index = 0) const; | ||||
|  | ||||
|     /// Helper function to write a buffer using the appropriate buffer descriptor | ||||
|     size_t WriteBuffer(const void* buffer, size_t size, int buffer_index = 0) const; | ||||
|     std::size_t WriteBuffer(const void* buffer, std::size_t size, int buffer_index = 0) const; | ||||
|  | ||||
|     /* Helper function to write a buffer using the appropriate buffer descriptor | ||||
|      * | ||||
| @@ -182,7 +182,7 @@ public: | ||||
|      */ | ||||
|     template <typename ContiguousContainer, | ||||
|               typename = std::enable_if_t<!std::is_pointer_v<ContiguousContainer>>> | ||||
|     size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const { | ||||
|     std::size_t WriteBuffer(const ContiguousContainer& container, int buffer_index = 0) const { | ||||
|         using ContiguousType = typename ContiguousContainer::value_type; | ||||
|  | ||||
|         static_assert(std::is_trivially_copyable_v<ContiguousType>, | ||||
| @@ -193,19 +193,19 @@ public: | ||||
|     } | ||||
|  | ||||
|     /// Helper function to get the size of the input buffer | ||||
|     size_t GetReadBufferSize(int buffer_index = 0) const; | ||||
|     std::size_t GetReadBufferSize(int buffer_index = 0) const; | ||||
|  | ||||
|     /// Helper function to get the size of the output buffer | ||||
|     size_t GetWriteBufferSize(int buffer_index = 0) const; | ||||
|     std::size_t GetWriteBufferSize(int buffer_index = 0) const; | ||||
|  | ||||
|     template <typename T> | ||||
|     SharedPtr<T> GetCopyObject(size_t index) { | ||||
|     SharedPtr<T> GetCopyObject(std::size_t index) { | ||||
|         ASSERT(index < copy_objects.size()); | ||||
|         return DynamicObjectCast<T>(copy_objects[index]); | ||||
|     } | ||||
|  | ||||
|     template <typename T> | ||||
|     SharedPtr<T> GetMoveObject(size_t index) { | ||||
|     SharedPtr<T> GetMoveObject(std::size_t index) { | ||||
|         ASSERT(index < move_objects.size()); | ||||
|         return DynamicObjectCast<T>(move_objects[index]); | ||||
|     } | ||||
| @@ -223,7 +223,7 @@ public: | ||||
|     } | ||||
|  | ||||
|     template <typename T> | ||||
|     std::shared_ptr<T> GetDomainRequestHandler(size_t index) const { | ||||
|     std::shared_ptr<T> GetDomainRequestHandler(std::size_t index) const { | ||||
|         return std::static_pointer_cast<T>(domain_request_handlers[index]); | ||||
|     } | ||||
|  | ||||
| @@ -240,15 +240,15 @@ public: | ||||
|         domain_objects.clear(); | ||||
|     } | ||||
|  | ||||
|     size_t NumMoveObjects() const { | ||||
|     std::size_t NumMoveObjects() const { | ||||
|         return move_objects.size(); | ||||
|     } | ||||
|  | ||||
|     size_t NumCopyObjects() const { | ||||
|     std::size_t NumCopyObjects() const { | ||||
|         return copy_objects.size(); | ||||
|     } | ||||
|  | ||||
|     size_t NumDomainObjects() const { | ||||
|     std::size_t NumDomainObjects() const { | ||||
|         return domain_objects.size(); | ||||
|     } | ||||
|  | ||||
|   | ||||
| @@ -40,8 +40,8 @@ SharedPtr<Process> Process::Create(KernelCore& kernel, std::string&& name) { | ||||
|     return process; | ||||
| } | ||||
|  | ||||
| void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) { | ||||
|     for (size_t i = 0; i < len; ++i) { | ||||
| void Process::ParseKernelCaps(const u32* kernel_caps, std::size_t len) { | ||||
|     for (std::size_t i = 0; i < len; ++i) { | ||||
|         u32 descriptor = kernel_caps[i]; | ||||
|         u32 type = descriptor >> 20; | ||||
|  | ||||
| @@ -211,7 +211,7 @@ ResultCode Process::MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size) { | ||||
|                "Shared memory exceeds bounds of mapped block"); | ||||
|  | ||||
|     const std::shared_ptr<std::vector<u8>>& backing_block = vma->second.backing_block; | ||||
|     size_t backing_block_offset = vma->second.offset + vma_offset; | ||||
|     std::size_t backing_block_offset = vma->second.offset + vma_offset; | ||||
|  | ||||
|     CASCADE_RESULT(auto new_vma, | ||||
|                    vm_manager.MapMemoryBlock(dst_addr, backing_block, backing_block_offset, size, | ||||
|   | ||||
| @@ -59,7 +59,7 @@ class ResourceLimit; | ||||
|  | ||||
| struct CodeSet final : public Object { | ||||
|     struct Segment { | ||||
|         size_t offset = 0; | ||||
|         std::size_t offset = 0; | ||||
|         VAddr addr = 0; | ||||
|         u32 size = 0; | ||||
|     }; | ||||
| @@ -164,7 +164,7 @@ public: | ||||
|      * Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them | ||||
|      * to this process. | ||||
|      */ | ||||
|     void ParseKernelCaps(const u32* kernel_caps, size_t len); | ||||
|     void ParseKernelCaps(const u32* kernel_caps, std::size_t len); | ||||
|  | ||||
|     /** | ||||
|      * Applies address space changes and launches the process main thread. | ||||
|   | ||||
| @@ -119,7 +119,7 @@ public: | ||||
|     /// Backing memory for this shared memory block. | ||||
|     std::shared_ptr<std::vector<u8>> backing_block; | ||||
|     /// Offset into the backing block for this shared memory. | ||||
|     size_t backing_block_offset; | ||||
|     std::size_t backing_block_offset; | ||||
|     /// Size of the memory block. Page-aligned. | ||||
|     u64 size; | ||||
|     /// Permission restrictions applied to the process which created the block. | ||||
|   | ||||
| @@ -146,7 +146,7 @@ static ResultCode GetProcessId(u32* process_id, Handle process_handle) { | ||||
|  | ||||
| /// Default thread wakeup callback for WaitSynchronization | ||||
| static bool DefaultThreadWakeupCallback(ThreadWakeupReason reason, SharedPtr<Thread> thread, | ||||
|                                         SharedPtr<WaitObject> object, size_t index) { | ||||
|                                         SharedPtr<WaitObject> object, std::size_t index) { | ||||
|     ASSERT(thread->status == ThreadStatus::WaitSynchAny); | ||||
|  | ||||
|     if (reason == ThreadWakeupReason::Timeout) { | ||||
| @@ -647,16 +647,17 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | ||||
|     LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", | ||||
|               condition_variable_addr, target); | ||||
|  | ||||
|     auto RetrieveWaitingThreads = | ||||
|         [](size_t core_index, std::vector<SharedPtr<Thread>>& waiting_threads, VAddr condvar_addr) { | ||||
|             const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); | ||||
|             auto& thread_list = scheduler->GetThreadList(); | ||||
|     auto RetrieveWaitingThreads = [](std::size_t core_index, | ||||
|                                      std::vector<SharedPtr<Thread>>& waiting_threads, | ||||
|                                      VAddr condvar_addr) { | ||||
|         const auto& scheduler = Core::System::GetInstance().Scheduler(core_index); | ||||
|         auto& thread_list = scheduler->GetThreadList(); | ||||
|  | ||||
|             for (auto& thread : thread_list) { | ||||
|                 if (thread->condvar_wait_address == condvar_addr) | ||||
|                     waiting_threads.push_back(thread); | ||||
|             } | ||||
|         }; | ||||
|         for (auto& thread : thread_list) { | ||||
|             if (thread->condvar_wait_address == condvar_addr) | ||||
|                 waiting_threads.push_back(thread); | ||||
|         } | ||||
|     }; | ||||
|  | ||||
|     // Retrieve a list of all threads that are waiting for this condition variable. | ||||
|     std::vector<SharedPtr<Thread>> waiting_threads; | ||||
| @@ -672,7 +673,7 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | ||||
|  | ||||
|     // Only process up to 'target' threads, unless 'target' is -1, in which case process | ||||
|     // them all. | ||||
|     size_t last = waiting_threads.size(); | ||||
|     std::size_t last = waiting_threads.size(); | ||||
|     if (target != -1) | ||||
|         last = target; | ||||
|  | ||||
| @@ -680,12 +681,12 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | ||||
|     if (last > waiting_threads.size()) | ||||
|         return RESULT_SUCCESS; | ||||
|  | ||||
|     for (size_t index = 0; index < last; ++index) { | ||||
|     for (std::size_t index = 0; index < last; ++index) { | ||||
|         auto& thread = waiting_threads[index]; | ||||
|  | ||||
|         ASSERT(thread->condvar_wait_address == condition_variable_addr); | ||||
|  | ||||
|         size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); | ||||
|         std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); | ||||
|  | ||||
|         auto& monitor = Core::System::GetInstance().Monitor(); | ||||
|  | ||||
|   | ||||
| @@ -275,7 +275,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name | ||||
|         available_slot = 0; // Use the first slot in the new page | ||||
|  | ||||
|         // Allocate some memory from the end of the linear heap for this region. | ||||
|         const size_t offset = thread->tls_memory->size(); | ||||
|         const std::size_t offset = thread->tls_memory->size(); | ||||
|         thread->tls_memory->insert(thread->tls_memory->end(), Memory::PAGE_SIZE, 0); | ||||
|  | ||||
|         auto& vm_manager = owner_process->vm_manager; | ||||
|   | ||||
| @@ -254,7 +254,7 @@ public: | ||||
|     Handle callback_handle; | ||||
|  | ||||
|     using WakeupCallback = bool(ThreadWakeupReason reason, SharedPtr<Thread> thread, | ||||
|                                 SharedPtr<WaitObject> object, size_t index); | ||||
|                                 SharedPtr<WaitObject> object, std::size_t index); | ||||
|     // Callback that will be invoked when the thread is resumed from a waiting state. If the thread | ||||
|     // was waiting via WaitSynchronizationN then the object will be the last object that became | ||||
|     // available. In case of a timeout, the object will be nullptr. | ||||
|   | ||||
| @@ -86,7 +86,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { | ||||
|  | ||||
| ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, | ||||
|                                                           std::shared_ptr<std::vector<u8>> block, | ||||
|                                                           size_t offset, u64 size, | ||||
|                                                           std::size_t offset, u64 size, | ||||
|                                                           MemoryState state) { | ||||
|     ASSERT(block != nullptr); | ||||
|     ASSERT(offset + size <= block->size()); | ||||
|   | ||||
| @@ -81,7 +81,7 @@ struct VirtualMemoryArea { | ||||
|     /// Memory block backing this VMA. | ||||
|     std::shared_ptr<std::vector<u8>> backing_block = nullptr; | ||||
|     /// Offset into the backing_memory the mapping starts from. | ||||
|     size_t offset = 0; | ||||
|     std::size_t offset = 0; | ||||
|  | ||||
|     // Settings for type = BackingMemory | ||||
|     /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. | ||||
| @@ -147,7 +147,7 @@ public: | ||||
|      * @param state MemoryState tag to attach to the VMA. | ||||
|      */ | ||||
|     ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, | ||||
|                                         size_t offset, u64 size, MemoryState state); | ||||
|                                         std::size_t offset, u64 size, MemoryState state); | ||||
|  | ||||
|     /** | ||||
|      * Maps an unmanaged host memory pointer at a given address. | ||||
|   | ||||
| @@ -81,7 +81,7 @@ void WaitObject::WakeupWaitingThread(SharedPtr<Thread> thread) { | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     size_t index = thread->GetWaitObjectIndex(this); | ||||
|     std::size_t index = thread->GetWaitObjectIndex(this); | ||||
|  | ||||
|     for (auto& object : thread->wait_objects) | ||||
|         object->RemoveWaitingThread(thread.get()); | ||||
|   | ||||
| @@ -33,7 +33,7 @@ ProfileManager::~ProfileManager() = default; | ||||
|  | ||||
| /// After a users creation it needs to be "registered" to the system. AddToProfiles handles the | ||||
| /// internal management of the users profiles | ||||
| boost::optional<size_t> ProfileManager::AddToProfiles(const ProfileInfo& user) { | ||||
| boost::optional<std::size_t> ProfileManager::AddToProfiles(const ProfileInfo& user) { | ||||
|     if (user_count >= MAX_USERS) { | ||||
|         return boost::none; | ||||
|     } | ||||
| @@ -42,7 +42,7 @@ boost::optional<size_t> ProfileManager::AddToProfiles(const ProfileInfo& user) { | ||||
| } | ||||
|  | ||||
| /// Deletes a specific profile based on it's profile index | ||||
| bool ProfileManager::RemoveProfileAtIndex(size_t index) { | ||||
| bool ProfileManager::RemoveProfileAtIndex(std::size_t index) { | ||||
|     if (index >= MAX_USERS || index >= user_count) { | ||||
|         return false; | ||||
|     } | ||||
| @@ -101,7 +101,7 @@ ResultCode ProfileManager::CreateNewUser(UUID uuid, const std::string& username) | ||||
| } | ||||
|  | ||||
| /// Returns a users profile index based on their user id. | ||||
| boost::optional<size_t> ProfileManager::GetUserIndex(const UUID& uuid) const { | ||||
| boost::optional<std::size_t> ProfileManager::GetUserIndex(const UUID& uuid) const { | ||||
|     if (!uuid) { | ||||
|         return boost::none; | ||||
|     } | ||||
| @@ -110,16 +110,17 @@ boost::optional<size_t> ProfileManager::GetUserIndex(const UUID& uuid) const { | ||||
|     if (iter == profiles.end()) { | ||||
|         return boost::none; | ||||
|     } | ||||
|     return static_cast<size_t>(std::distance(profiles.begin(), iter)); | ||||
|     return static_cast<std::size_t>(std::distance(profiles.begin(), iter)); | ||||
| } | ||||
|  | ||||
| /// Returns a users profile index based on their profile | ||||
| boost::optional<size_t> ProfileManager::GetUserIndex(const ProfileInfo& user) const { | ||||
| boost::optional<std::size_t> ProfileManager::GetUserIndex(const ProfileInfo& user) const { | ||||
|     return GetUserIndex(user.user_uuid); | ||||
| } | ||||
|  | ||||
| /// Returns the data structure used by the switch when GetProfileBase is called on acc:* | ||||
| bool ProfileManager::GetProfileBase(boost::optional<size_t> index, ProfileBase& profile) const { | ||||
| bool ProfileManager::GetProfileBase(boost::optional<std::size_t> index, | ||||
|                                     ProfileBase& profile) const { | ||||
|     if (index == boost::none || index >= MAX_USERS) { | ||||
|         return false; | ||||
|     } | ||||
| @@ -143,14 +144,16 @@ bool ProfileManager::GetProfileBase(const ProfileInfo& user, ProfileBase& profil | ||||
|  | ||||
| /// Returns the current user count on the system. We keep a variable which tracks the count so we | ||||
| /// don't have to loop the internal profile array every call. | ||||
| size_t ProfileManager::GetUserCount() const { | ||||
|  | ||||
| std::size_t ProfileManager::GetUserCount() const { | ||||
|     return user_count; | ||||
| } | ||||
|  | ||||
| /// Lists the current "opened" users on the system. Users are typically not open until they sign | ||||
| /// into something or pick a profile. As of right now users should all be open until qlaunch is | ||||
| /// booting | ||||
| size_t ProfileManager::GetOpenUserCount() const { | ||||
|  | ||||
| std::size_t ProfileManager::GetOpenUserCount() const { | ||||
|     return std::count_if(profiles.begin(), profiles.end(), | ||||
|                          [](const ProfileInfo& p) { return p.is_open; }); | ||||
| } | ||||
| @@ -206,7 +209,7 @@ UUID ProfileManager::GetLastOpenedUser() const { | ||||
| } | ||||
|  | ||||
| /// Return the users profile base and the unknown arbitary data. | ||||
| bool ProfileManager::GetProfileBaseAndData(boost::optional<size_t> index, ProfileBase& profile, | ||||
| bool ProfileManager::GetProfileBaseAndData(boost::optional<std::size_t> index, ProfileBase& profile, | ||||
|                                            ProfileData& data) const { | ||||
|     if (GetProfileBase(index, profile)) { | ||||
|         data = profiles[index.get()].data; | ||||
|   | ||||
| @@ -12,8 +12,8 @@ | ||||
| #include "core/hle/result.h" | ||||
|  | ||||
| namespace Service::Account { | ||||
| constexpr size_t MAX_USERS = 8; | ||||
| constexpr size_t MAX_DATA = 128; | ||||
| constexpr std::size_t MAX_USERS = 8; | ||||
| constexpr std::size_t MAX_DATA = 128; | ||||
| constexpr u128 INVALID_UUID{{0, 0}}; | ||||
|  | ||||
| struct UUID { | ||||
| @@ -87,18 +87,18 @@ public: | ||||
|     ResultCode AddUser(const ProfileInfo& user); | ||||
|     ResultCode CreateNewUser(UUID uuid, const ProfileUsername& username); | ||||
|     ResultCode CreateNewUser(UUID uuid, const std::string& username); | ||||
|     boost::optional<size_t> GetUserIndex(const UUID& uuid) const; | ||||
|     boost::optional<size_t> GetUserIndex(const ProfileInfo& user) const; | ||||
|     bool GetProfileBase(boost::optional<size_t> index, ProfileBase& profile) const; | ||||
|     boost::optional<std::size_t> GetUserIndex(const UUID& uuid) const; | ||||
|     boost::optional<std::size_t> GetUserIndex(const ProfileInfo& user) const; | ||||
|     bool GetProfileBase(boost::optional<std::size_t> index, ProfileBase& profile) const; | ||||
|     bool GetProfileBase(UUID uuid, ProfileBase& profile) const; | ||||
|     bool GetProfileBase(const ProfileInfo& user, ProfileBase& profile) const; | ||||
|     bool GetProfileBaseAndData(boost::optional<size_t> index, ProfileBase& profile, | ||||
|     bool GetProfileBaseAndData(boost::optional<std::size_t> index, ProfileBase& profile, | ||||
|                                ProfileData& data) const; | ||||
|     bool GetProfileBaseAndData(UUID uuid, ProfileBase& profile, ProfileData& data) const; | ||||
|     bool GetProfileBaseAndData(const ProfileInfo& user, ProfileBase& profile, | ||||
|                                ProfileData& data) const; | ||||
|     size_t GetUserCount() const; | ||||
|     size_t GetOpenUserCount() const; | ||||
|     std::size_t GetUserCount() const; | ||||
|     std::size_t GetOpenUserCount() const; | ||||
|     bool UserExists(UUID uuid) const; | ||||
|     void OpenUser(UUID uuid); | ||||
|     void CloseUser(UUID uuid); | ||||
| @@ -110,9 +110,9 @@ public: | ||||
|  | ||||
| private: | ||||
|     std::array<ProfileInfo, MAX_USERS> profiles{}; | ||||
|     size_t user_count = 0; | ||||
|     boost::optional<size_t> AddToProfiles(const ProfileInfo& profile); | ||||
|     bool RemoveProfileAtIndex(size_t index); | ||||
|     std::size_t user_count = 0; | ||||
|     boost::optional<std::size_t> AddToProfiles(const ProfileInfo& profile); | ||||
|     bool RemoveProfileAtIndex(std::size_t index); | ||||
|     UUID last_opened_user{INVALID_UUID}; | ||||
| }; | ||||
|  | ||||
|   | ||||
| @@ -456,7 +456,7 @@ private: | ||||
|         IPC::RequestParser rp{ctx}; | ||||
|  | ||||
|         const u64 offset{rp.Pop<u64>()}; | ||||
|         const size_t size{ctx.GetWriteBufferSize()}; | ||||
|         const std::size_t size{ctx.GetWriteBufferSize()}; | ||||
|  | ||||
|         ASSERT(offset + size <= buffer.size()); | ||||
|  | ||||
|   | ||||
| @@ -61,7 +61,7 @@ private: | ||||
|  | ||||
|     bool Decoder_DecodeInterleaved(u32& consumed, u32& sample_count, const std::vector<u8>& input, | ||||
|                                    std::vector<opus_int16>& output) { | ||||
|         size_t raw_output_sz = output.size() * sizeof(opus_int16); | ||||
|         std::size_t raw_output_sz = output.size() * sizeof(opus_int16); | ||||
|         if (sizeof(OpusHeader) > input.size()) | ||||
|             return false; | ||||
|         OpusHeader hdr{}; | ||||
| @@ -96,7 +96,7 @@ private: | ||||
|     u32 channel_count; | ||||
| }; | ||||
|  | ||||
| static size_t WorkerBufferSize(u32 channel_count) { | ||||
| static std::size_t WorkerBufferSize(u32 channel_count) { | ||||
|     ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count"); | ||||
|     return opus_decoder_get_size(static_cast<int>(channel_count)); | ||||
| } | ||||
| @@ -129,7 +129,7 @@ void HwOpus::OpenOpusDecoder(Kernel::HLERequestContext& ctx) { | ||||
|                "Invalid sample rate"); | ||||
|     ASSERT_MSG(channel_count == 1 || channel_count == 2, "Invalid channel count"); | ||||
|  | ||||
|     size_t worker_sz = WorkerBufferSize(channel_count); | ||||
|     std::size_t worker_sz = WorkerBufferSize(channel_count); | ||||
|     ASSERT_MSG(buffer_sz < worker_sz, "Worker buffer too large"); | ||||
|     std::unique_ptr<OpusDecoder, OpusDeleter> decoder{ | ||||
|         static_cast<OpusDecoder*>(operator new(worker_sz))}; | ||||
|   | ||||
| @@ -89,7 +89,7 @@ private: | ||||
|         controller_header.left_color_body = JOYCON_BODY_NEON_BLUE; | ||||
|         controller_header.left_color_buttons = JOYCON_BUTTONS_NEON_BLUE; | ||||
|  | ||||
|         for (size_t controller = 0; controller < mem.controllers.size(); controller++) { | ||||
|         for (std::size_t controller = 0; controller < mem.controllers.size(); controller++) { | ||||
|             for (auto& layout : mem.controllers[controller].layouts) { | ||||
|                 layout.header.num_entries = HID_NUM_ENTRIES; | ||||
|                 layout.header.max_entry_index = HID_NUM_ENTRIES - 1; | ||||
|   | ||||
| @@ -99,7 +99,7 @@ private: | ||||
|         std::string thread; | ||||
|         while (addr < end_addr) { | ||||
|             const Field field{static_cast<Field>(Memory::Read8(addr++))}; | ||||
|             const size_t length{Memory::Read8(addr++)}; | ||||
|             const std::size_t length{Memory::Read8(addr++)}; | ||||
|  | ||||
|             if (static_cast<Field>(Memory::Read8(addr)) == Field::Skip) { | ||||
|                 ++addr; | ||||
|   | ||||
| @@ -78,7 +78,7 @@ enum class LoadState : u32 { | ||||
| }; | ||||
|  | ||||
| static void DecryptSharedFont(const std::vector<u32>& input, std::vector<u8>& output, | ||||
|                               size_t& offset) { | ||||
|                               std::size_t& offset) { | ||||
|     ASSERT_MSG(offset + (input.size() * sizeof(u32)) < SHARED_FONT_MEM_SIZE, | ||||
|                "Shared fonts exceeds 17mb!"); | ||||
|     ASSERT_MSG(input[0] == EXPECTED_MAGIC, "Failed to derive key, unexpected magic number"); | ||||
| @@ -95,7 +95,7 @@ static void DecryptSharedFont(const std::vector<u32>& input, std::vector<u8>& ou | ||||
| } | ||||
|  | ||||
| static void EncryptSharedFont(const std::vector<u8>& input, std::vector<u8>& output, | ||||
|                               size_t& offset) { | ||||
|                               std::size_t& offset) { | ||||
|     ASSERT_MSG(offset + input.size() + 8 < SHARED_FONT_MEM_SIZE, "Shared fonts exceeds 17mb!"); | ||||
|     const u32 KEY = EXPECTED_MAGIC ^ EXPECTED_RESULT; | ||||
|     std::memcpy(output.data() + offset, &EXPECTED_RESULT, sizeof(u32)); // Magic header | ||||
| @@ -113,7 +113,7 @@ static u32 GetU32Swapped(const u8* data) { | ||||
| } | ||||
|  | ||||
| struct PL_U::Impl { | ||||
|     const FontRegion& GetSharedFontRegion(size_t index) const { | ||||
|     const FontRegion& GetSharedFontRegion(std::size_t index) const { | ||||
|         if (index >= shared_font_regions.size() || shared_font_regions.empty()) { | ||||
|             // No font fallback | ||||
|             return EMPTY_REGION; | ||||
| @@ -126,7 +126,7 @@ struct PL_U::Impl { | ||||
|         // based on the shared memory dump | ||||
|         unsigned cur_offset = 0; | ||||
|  | ||||
|         for (size_t i = 0; i < SHARED_FONTS.size(); i++) { | ||||
|         for (std::size_t i = 0; i < SHARED_FONTS.size(); i++) { | ||||
|             // Out of shared fonts/invalid font | ||||
|             if (GetU32Swapped(input.data() + cur_offset) != EXPECTED_RESULT) { | ||||
|                 break; | ||||
| @@ -162,7 +162,7 @@ PL_U::PL_U() : ServiceFramework("pl:u"), impl{std::make_unique<Impl>()} { | ||||
|     RegisterHandlers(functions); | ||||
|     // Attempt to load shared font data from disk | ||||
|     const auto nand = FileSystem::GetSystemNANDContents(); | ||||
|     size_t offset = 0; | ||||
|     std::size_t offset = 0; | ||||
|     // Rebuild shared fonts from data ncas | ||||
|     if (nand->HasEntry(static_cast<u64>(FontArchives::Standard), | ||||
|                        FileSys::ContentRecordType::Data)) { | ||||
| @@ -344,7 +344,7 @@ void PL_U::GetSharedFontInOrderOfPriority(Kernel::HLERequestContext& ctx) { | ||||
|     std::vector<u32> font_sizes; | ||||
|  | ||||
|     // TODO(ogniK): Have actual priority order | ||||
|     for (size_t i = 0; i < impl->shared_font_regions.size(); i++) { | ||||
|     for (std::size_t i = 0; i < impl->shared_font_regions.size(); i++) { | ||||
|         font_codes.push_back(static_cast<u32>(i)); | ||||
|         auto region = impl->GetSharedFontRegion(i); | ||||
|         font_offsets.push_back(region.offset); | ||||
|   | ||||
| @@ -71,7 +71,7 @@ u32 nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<u8>& | ||||
| } | ||||
|  | ||||
| u32 nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& output) { | ||||
|     size_t num_entries = input.size() / sizeof(IoctlRemapEntry); | ||||
|     std::size_t num_entries = input.size() / sizeof(IoctlRemapEntry); | ||||
|  | ||||
|     LOG_WARNING(Service_NVDRV, "(STUBBED) called, num_entries=0x{:X}", num_entries); | ||||
|  | ||||
|   | ||||
| @@ -23,7 +23,7 @@ | ||||
|  | ||||
| namespace Service::NVFlinger { | ||||
|  | ||||
| constexpr size_t SCREEN_REFRESH_RATE = 60; | ||||
| constexpr std::size_t SCREEN_REFRESH_RATE = 60; | ||||
| constexpr u64 frame_ticks = static_cast<u64>(CoreTiming::BASE_CLOCK_RATE / SCREEN_REFRESH_RATE); | ||||
|  | ||||
| NVFlinger::NVFlinger() { | ||||
|   | ||||
| @@ -129,9 +129,9 @@ Kernel::SharedPtr<Kernel::ClientPort> ServiceFrameworkBase::CreatePort() { | ||||
|     return client_port; | ||||
| } | ||||
|  | ||||
| void ServiceFrameworkBase::RegisterHandlersBase(const FunctionInfoBase* functions, size_t n) { | ||||
| void ServiceFrameworkBase::RegisterHandlersBase(const FunctionInfoBase* functions, std::size_t n) { | ||||
|     handlers.reserve(handlers.size() + n); | ||||
|     for (size_t i = 0; i < n; ++i) { | ||||
|     for (std::size_t i = 0; i < n; ++i) { | ||||
|         // Usually this array is sorted by id already, so hint to insert at the end | ||||
|         handlers.emplace_hint(handlers.cend(), functions[i].expected_header, functions[i]); | ||||
|     } | ||||
|   | ||||
| @@ -88,7 +88,7 @@ private: | ||||
|     ServiceFrameworkBase(const char* service_name, u32 max_sessions, InvokerFn* handler_invoker); | ||||
|     ~ServiceFrameworkBase(); | ||||
|  | ||||
|     void RegisterHandlersBase(const FunctionInfoBase* functions, size_t n); | ||||
|     void RegisterHandlersBase(const FunctionInfoBase* functions, std::size_t n); | ||||
|     void ReportUnimplementedFunction(Kernel::HLERequestContext& ctx, const FunctionInfoBase* info); | ||||
|  | ||||
|     /// Identifier string used to connect to the service. | ||||
| @@ -152,7 +152,7 @@ protected: | ||||
|         : ServiceFrameworkBase(service_name, max_sessions, Invoker) {} | ||||
|  | ||||
|     /// Registers handlers in the service. | ||||
|     template <size_t N> | ||||
|     template <std::size_t N> | ||||
|     void RegisterHandlers(const FunctionInfo (&functions)[N]) { | ||||
|         RegisterHandlers(functions, N); | ||||
|     } | ||||
| @@ -161,7 +161,7 @@ protected: | ||||
|      * Registers handlers in the service. Usually prefer using the other RegisterHandlers | ||||
|      * overload in order to avoid needing to specify the array size. | ||||
|      */ | ||||
|     void RegisterHandlers(const FunctionInfo* functions, size_t n) { | ||||
|     void RegisterHandlers(const FunctionInfo* functions, std::size_t n) { | ||||
|         RegisterHandlersBase(functions, n); | ||||
|     } | ||||
|  | ||||
|   | ||||
| @@ -32,21 +32,21 @@ constexpr std::array<LanguageCode, 17> available_language_codes = {{ | ||||
|     LanguageCode::ZH_HANT, | ||||
| }}; | ||||
|  | ||||
| constexpr size_t pre4_0_0_max_entries = 0xF; | ||||
| constexpr size_t post4_0_0_max_entries = 0x40; | ||||
| constexpr std::size_t pre4_0_0_max_entries = 0xF; | ||||
| constexpr std::size_t post4_0_0_max_entries = 0x40; | ||||
|  | ||||
| LanguageCode GetLanguageCodeFromIndex(size_t index) { | ||||
| LanguageCode GetLanguageCodeFromIndex(std::size_t index) { | ||||
|     return available_language_codes.at(index); | ||||
| } | ||||
|  | ||||
| template <size_t size> | ||||
| template <std::size_t size> | ||||
| static std::array<LanguageCode, size> MakeLanguageCodeSubset() { | ||||
|     std::array<LanguageCode, size> arr; | ||||
|     std::copy_n(available_language_codes.begin(), size, arr.begin()); | ||||
|     return arr; | ||||
| } | ||||
|  | ||||
| static void PushResponseLanguageCode(Kernel::HLERequestContext& ctx, size_t max_size) { | ||||
| static void PushResponseLanguageCode(Kernel::HLERequestContext& ctx, std::size_t max_size) { | ||||
|     IPC::ResponseBuilder rb{ctx, 3}; | ||||
|     rb.Push(RESULT_SUCCESS); | ||||
|     if (available_language_codes.size() > max_size) | ||||
|   | ||||
| @@ -28,7 +28,7 @@ enum class LanguageCode : u64 { | ||||
|     ZH_HANS = 0x00736E61482D687A, | ||||
|     ZH_HANT = 0x00746E61482D687A, | ||||
| }; | ||||
| LanguageCode GetLanguageCodeFromIndex(size_t idx); | ||||
| LanguageCode GetLanguageCodeFromIndex(std::size_t idx); | ||||
|  | ||||
| class SET final : public ServiceFramework<SET> { | ||||
| public: | ||||
|   | ||||
| @@ -21,7 +21,7 @@ Module::Interface::~Interface() = default; | ||||
| void Module::Interface::GetRandomBytes(Kernel::HLERequestContext& ctx) { | ||||
|     IPC::RequestParser rp{ctx}; | ||||
|  | ||||
|     size_t size = ctx.GetWriteBufferSize(); | ||||
|     std::size_t size = ctx.GetWriteBufferSize(); | ||||
|  | ||||
|     std::vector<u8> data(size); | ||||
|     std::generate(data.begin(), data.end(), std::rand); | ||||
|   | ||||
| @@ -38,7 +38,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size"); | ||||
| class Parcel { | ||||
| public: | ||||
|     // This default size was chosen arbitrarily. | ||||
|     static constexpr size_t DefaultBufferSize = 0x40; | ||||
|     static constexpr std::size_t DefaultBufferSize = 0x40; | ||||
|     Parcel() : buffer(DefaultBufferSize) {} | ||||
|     explicit Parcel(std::vector<u8> data) : buffer(std::move(data)) {} | ||||
|     virtual ~Parcel() = default; | ||||
| @@ -66,7 +66,7 @@ public: | ||||
|         return val; | ||||
|     } | ||||
|  | ||||
|     std::vector<u8> ReadBlock(size_t length) { | ||||
|     std::vector<u8> ReadBlock(std::size_t length) { | ||||
|         ASSERT(read_index + length <= buffer.size()); | ||||
|         const u8* const begin = buffer.data() + read_index; | ||||
|         const u8* const end = begin + length; | ||||
| @@ -156,8 +156,8 @@ private: | ||||
|     static_assert(sizeof(Header) == 16, "ParcelHeader has wrong size"); | ||||
|  | ||||
|     std::vector<u8> buffer; | ||||
|     size_t read_index = 0; | ||||
|     size_t write_index = 0; | ||||
|     std::size_t read_index = 0; | ||||
|     std::size_t write_index = 0; | ||||
| }; | ||||
|  | ||||
| class NativeWindow : public Parcel { | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 fearlessTobi
					fearlessTobi