hle: kernel: TimeManager: Simplify to not rely on previous EmuThreadHandle implementation.
This commit is contained in:
parent
bb966d3e33
commit
c0f5830323
@ -232,10 +232,9 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
|||||||
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||||
// Prepare to wait.
|
// Prepare to wait.
|
||||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
Handle timer = InvalidHandle;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
|
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||||
|
|
||||||
// Check that the thread isn't terminating.
|
// Check that the thread isn't terminating.
|
||||||
if (cur_thread->IsTerminationRequested()) {
|
if (cur_thread->IsTerminationRequested()) {
|
||||||
@ -280,10 +279,7 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Cancel the timer wait.
|
// Cancel the timer wait.
|
||||||
if (timer != InvalidHandle) {
|
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||||
auto& time_manager = kernel.TimeManager();
|
|
||||||
time_manager.UnscheduleTimeEvent(timer);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from the address arbiter.
|
// Remove from the address arbiter.
|
||||||
{
|
{
|
||||||
@ -303,10 +299,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
|||||||
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||||
// Prepare to wait.
|
// Prepare to wait.
|
||||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
Handle timer = InvalidHandle;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
|
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||||
|
|
||||||
// Check that the thread isn't terminating.
|
// Check that the thread isn't terminating.
|
||||||
if (cur_thread->IsTerminationRequested()) {
|
if (cur_thread->IsTerminationRequested()) {
|
||||||
@ -344,10 +339,7 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Cancel the timer wait.
|
// Cancel the timer wait.
|
||||||
if (timer != InvalidHandle) {
|
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||||
auto& time_manager = kernel.TimeManager();
|
|
||||||
time_manager.UnscheduleTimeEvent(timer);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from the address arbiter.
|
// Remove from the address arbiter.
|
||||||
{
|
{
|
||||||
|
@ -258,10 +258,9 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
|||||||
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||||
// Prepare to wait.
|
// Prepare to wait.
|
||||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
Handle timer = InvalidHandle;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
|
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||||
|
|
||||||
// Set the synced object.
|
// Set the synced object.
|
||||||
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
|
||||||
@ -322,10 +321,7 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Cancel the timer wait.
|
// Cancel the timer wait.
|
||||||
if (timer != InvalidHandle) {
|
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||||
auto& time_manager = kernel.TimeManager();
|
|
||||||
time_manager.UnscheduleTimeEvent(timer);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from the condition variable.
|
// Remove from the condition variable.
|
||||||
{
|
{
|
||||||
|
@ -17,19 +17,16 @@ namespace Kernel {
|
|||||||
|
|
||||||
class KScopedSchedulerLockAndSleep {
|
class KScopedSchedulerLockAndSleep {
|
||||||
public:
|
public:
|
||||||
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, Handle& event_handle, KThread* t,
|
explicit KScopedSchedulerLockAndSleep(KernelCore& kernel, KThread* t, s64 timeout)
|
||||||
s64 timeout)
|
: kernel(kernel), thread(t), timeout_tick(timeout) {
|
||||||
: kernel(kernel), event_handle(event_handle), thread(t), timeout_tick(timeout) {
|
|
||||||
event_handle = InvalidHandle;
|
|
||||||
|
|
||||||
// Lock the scheduler.
|
// Lock the scheduler.
|
||||||
kernel.GlobalSchedulerContext().scheduler_lock.Lock();
|
kernel.GlobalSchedulerContext().scheduler_lock.Lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
~KScopedSchedulerLockAndSleep() {
|
~KScopedSchedulerLockAndSleep() {
|
||||||
// Register the sleep.
|
// Register the sleep.
|
||||||
if (this->timeout_tick > 0) {
|
if (timeout_tick > 0) {
|
||||||
kernel.TimeManager().ScheduleTimeEvent(event_handle, this->thread, this->timeout_tick);
|
kernel.TimeManager().ScheduleTimeEvent(thread, timeout_tick);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlock the scheduler.
|
// Unlock the scheduler.
|
||||||
@ -37,12 +34,11 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CancelSleep() {
|
void CancelSleep() {
|
||||||
this->timeout_tick = 0;
|
timeout_tick = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
KernelCore& kernel;
|
KernelCore& kernel;
|
||||||
Handle& event_handle;
|
|
||||||
KThread* thread{};
|
KThread* thread{};
|
||||||
s64 timeout_tick{};
|
s64 timeout_tick{};
|
||||||
};
|
};
|
||||||
|
@ -21,11 +21,10 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
|
|||||||
|
|
||||||
// Prepare for wait.
|
// Prepare for wait.
|
||||||
KThread* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
Handle timer = InvalidHandle;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
// Setup the scheduling lock and sleep.
|
// Setup the scheduling lock and sleep.
|
||||||
KScopedSchedulerLockAndSleep slp(kernel, timer, thread, timeout);
|
KScopedSchedulerLockAndSleep slp{kernel, thread, timeout};
|
||||||
|
|
||||||
// Check if any of the objects are already signaled.
|
// Check if any of the objects are already signaled.
|
||||||
for (auto i = 0; i < num_objects; ++i) {
|
for (auto i = 0; i < num_objects; ++i) {
|
||||||
@ -90,10 +89,7 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
|
|||||||
thread->SetWaitObjectsForDebugging({});
|
thread->SetWaitObjectsForDebugging({});
|
||||||
|
|
||||||
// Cancel the timer as needed.
|
// Cancel the timer as needed.
|
||||||
if (timer != InvalidHandle) {
|
kernel.TimeManager().UnscheduleTimeEvent(thread);
|
||||||
auto& time_manager = kernel.TimeManager();
|
|
||||||
time_manager.UnscheduleTimeEvent(timer);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the wait result.
|
// Get the wait result.
|
||||||
ResultCode wait_result{RESULT_SUCCESS};
|
ResultCode wait_result{RESULT_SUCCESS};
|
||||||
|
@ -21,47 +21,27 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
|||||||
std::shared_ptr<KThread> thread;
|
std::shared_ptr<KThread> thread;
|
||||||
{
|
{
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
const auto proper_handle = static_cast<Handle>(thread_handle);
|
thread = SharedFrom<KThread>(reinterpret_cast<KThread*>(thread_handle));
|
||||||
if (cancelled_events[proper_handle]) {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
thread = system.Kernel().RetrieveThreadFromGlobalHandleTable(proper_handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (thread) {
|
|
||||||
// Thread can be null if process has exited
|
|
||||||
thread->Wakeup();
|
thread->Wakeup();
|
||||||
}
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void TimeManager::ScheduleTimeEvent(Handle& event_handle, KThread* timetask, s64 nanoseconds) {
|
void TimeManager::ScheduleTimeEvent(KThread* thread, s64 nanoseconds) {
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
event_handle = timetask->GetGlobalHandle();
|
|
||||||
if (nanoseconds > 0) {
|
if (nanoseconds > 0) {
|
||||||
ASSERT(timetask);
|
ASSERT(thread);
|
||||||
ASSERT(timetask->GetState() != ThreadState::Runnable);
|
ASSERT(thread->GetState() != ThreadState::Runnable);
|
||||||
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
|
system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
|
||||||
time_manager_event_type, event_handle);
|
time_manager_event_type,
|
||||||
} else {
|
reinterpret_cast<uintptr_t>(thread));
|
||||||
event_handle = InvalidHandle;
|
|
||||||
}
|
}
|
||||||
cancelled_events[event_handle] = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void TimeManager::UnscheduleTimeEvent(Handle event_handle) {
|
void TimeManager::UnscheduleTimeEvent(KThread* thread) {
|
||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
if (event_handle == InvalidHandle) {
|
system.CoreTiming().UnscheduleEvent(time_manager_event_type,
|
||||||
return;
|
reinterpret_cast<uintptr_t>(thread));
|
||||||
}
|
|
||||||
system.CoreTiming().UnscheduleEvent(time_manager_event_type, event_handle);
|
|
||||||
cancelled_events[event_handle] = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void TimeManager::CancelTimeEvent(KThread* time_task) {
|
|
||||||
std::lock_guard lock{mutex};
|
|
||||||
const Handle event_handle = time_task->GetGlobalHandle();
|
|
||||||
UnscheduleTimeEvent(event_handle);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
@ -31,18 +31,14 @@ public:
|
|||||||
explicit TimeManager(Core::System& system);
|
explicit TimeManager(Core::System& system);
|
||||||
|
|
||||||
/// Schedule a time event on `timetask` thread that will expire in 'nanoseconds'
|
/// Schedule a time event on `timetask` thread that will expire in 'nanoseconds'
|
||||||
/// returns a non-invalid handle in `event_handle` if correctly scheduled
|
void ScheduleTimeEvent(KThread* time_task, s64 nanoseconds);
|
||||||
void ScheduleTimeEvent(Handle& event_handle, KThread* timetask, s64 nanoseconds);
|
|
||||||
|
|
||||||
/// Unschedule an existing time event
|
/// Unschedule an existing time event
|
||||||
void UnscheduleTimeEvent(Handle event_handle);
|
void UnscheduleTimeEvent(KThread* thread);
|
||||||
|
|
||||||
void CancelTimeEvent(KThread* time_task);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
|
std::shared_ptr<Core::Timing::EventType> time_manager_event_type;
|
||||||
std::unordered_map<Handle, bool> cancelled_events;
|
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user