Skip to content

Commit d152537

Browse files
knight4u32Ivan
authored andcommitted
sys_lwcond/cond/ppu_thread: Respect scheduler in various syscalls
1 parent 5d4e873 commit d152537

File tree

3 files changed

+376
-176
lines changed

3 files changed

+376
-176
lines changed

rpcs3/Emu/Cell/lv2/sys_cond.cpp

Lines changed: 168 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -143,36 +143,70 @@ error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id)
143143

144144
sys_cond.trace("sys_cond_signal(cond_id=0x%x)", cond_id);
145145

146-
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond)
146+
while (true)
147147
{
148-
if (atomic_storage<ppu_thread*>::load(cond.sq))
148+
if (ppu.test_stopped())
149149
{
150-
std::lock_guard lock(cond.mutex->mutex);
150+
ppu.state += cpu_flag::again;
151+
return {};
152+
}
151153

152-
if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
154+
bool finished = true;
155+
156+
ppu.state += cpu_flag::wait;
157+
158+
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond)
159+
{
160+
if (atomic_storage<ppu_thread*>::load(cond.sq))
153161
{
154-
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
162+
std::lock_guard lock(cond.mutex->mutex);
163+
164+
if (ppu.state & cpu_flag::suspend)
155165
{
156-
ppu.state += cpu_flag::again;
166+
// Test if another signal caused the current thread to be suspended, in which case it needs to wait until the thread wakes up (otherwise the signal may cause unexpected results)
167+
finished = false;
157168
return;
158169
}
159170

160-
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
171+
if (const auto cpu = cond.schedule<ppu_thread>(cond.sq, cond.mutex->protocol))
172+
{
173+
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
174+
{
175+
ppu.state += cpu_flag::again;
176+
return;
177+
}
178+
179+
// TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy?
161180

162-
if (cond.mutex->try_own(*cpu))
181+
if (cond.mutex->try_own(*cpu))
182+
{
183+
cond.awake(cpu);
184+
}
185+
}
186+
}
187+
else
188+
{
189+
cond.mutex->mutex.lock_unlock();
190+
191+
if (ppu.state & cpu_flag::suspend)
163192
{
164-
cond.awake(cpu);
193+
finished = false;
165194
}
166195
}
196+
});
197+
198+
if (!finished)
199+
{
200+
continue;
167201
}
168-
});
169202

170-
if (!cond)
171-
{
172-
return CELL_ESRCH;
173-
}
203+
if (!cond)
204+
{
205+
return CELL_ESRCH;
206+
}
174207

175-
return CELL_OK;
208+
return CELL_OK;
209+
}
176210
}
177211

178212
error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
@@ -181,46 +215,80 @@ error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id)
181215

182216
sys_cond.trace("sys_cond_signal_all(cond_id=0x%x)", cond_id);
183217

184-
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond)
218+
while (true)
185219
{
186-
if (atomic_storage<ppu_thread*>::load(cond.sq))
220+
if (ppu.test_stopped())
187221
{
188-
std::lock_guard lock(cond.mutex->mutex);
222+
ppu.state += cpu_flag::again;
223+
return {};
224+
}
189225

190-
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu)
226+
bool finished = true;
227+
228+
ppu.state += cpu_flag::wait;
229+
230+
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond)
231+
{
232+
if (atomic_storage<ppu_thread*>::load(cond.sq))
191233
{
192-
if (cpu->state & cpu_flag::again)
234+
std::lock_guard lock(cond.mutex->mutex);
235+
236+
if (ppu.state & cpu_flag::suspend)
193237
{
194-
ppu.state += cpu_flag::again;
238+
// Test if another signal caused the current thread to be suspended, in which case it needs to wait until the thread wakes up (otherwise the signal may cause unexpected results)
239+
finished = false;
195240
return;
196241
}
197-
}
198242

199-
cpu_thread* result = nullptr;
200-
auto sq = cond.sq;
201-
atomic_storage<ppu_thread*>::release(cond.sq, nullptr);
243+
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu)
244+
{
245+
if (cpu->state & cpu_flag::again)
246+
{
247+
ppu.state += cpu_flag::again;
248+
return;
249+
}
250+
}
251+
252+
cpu_thread* result = nullptr;
253+
auto sq = cond.sq;
254+
atomic_storage<ppu_thread*>::release(cond.sq, nullptr);
202255

203-
while (const auto cpu = cond.schedule<ppu_thread>(sq, SYS_SYNC_PRIORITY))
204-
{
205-
if (cond.mutex->try_own(*cpu))
256+
while (const auto cpu = cond.schedule<ppu_thread>(sq, SYS_SYNC_PRIORITY))
206257
{
207-
ensure(!std::exchange(result, cpu));
258+
if (cond.mutex->try_own(*cpu))
259+
{
260+
ensure(!std::exchange(result, cpu));
261+
}
208262
}
209-
}
210263

211-
if (result)
264+
if (result)
265+
{
266+
cond.awake(result);
267+
}
268+
}
269+
else
212270
{
213-
cond.awake(result);
271+
cond.mutex->mutex.lock_unlock();
272+
273+
if (ppu.state & cpu_flag::suspend)
274+
{
275+
finished = false;
276+
}
214277
}
278+
});
279+
280+
if (!finished)
281+
{
282+
continue;
215283
}
216-
});
217284

218-
if (!cond)
219-
{
220-
return CELL_ESRCH;
221-
}
285+
if (!cond)
286+
{
287+
return CELL_ESRCH;
288+
}
222289

223-
return CELL_OK;
290+
return CELL_OK;
291+
}
224292
}
225293

226294
error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
@@ -229,53 +297,88 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id)
229297

230298
sys_cond.trace("sys_cond_signal_to(cond_id=0x%x, thread_id=0x%x)", cond_id, thread_id);
231299

232-
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond) -> int
300+
while (true)
233301
{
234-
if (!idm::check_unlocked<named_thread<ppu_thread>>(thread_id))
302+
if (ppu.test_stopped())
235303
{
236-
return -1;
304+
ppu.state += cpu_flag::again;
305+
return {};
237306
}
238307

239-
if (atomic_storage<ppu_thread*>::load(cond.sq))
308+
bool finished = true;
309+
310+
ppu.state += cpu_flag::wait;
311+
312+
const auto cond = idm::check<lv2_obj, lv2_cond>(cond_id, [&, notify = lv2_obj::notify_all_t()](lv2_cond& cond)
240313
{
241-
std::lock_guard lock(cond.mutex->mutex);
314+
if (!idm::check_unlocked<named_thread<ppu_thread>>(thread_id))
315+
{
316+
return -1;
317+
}
242318

243-
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu)
319+
if (atomic_storage<ppu_thread*>::load(cond.sq))
244320
{
245-
if (cpu->id == thread_id)
321+
std::lock_guard lock(cond.mutex->mutex);
322+
323+
if (ppu.state & cpu_flag::suspend)
246324
{
247-
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
325+
// Test if another signal caused the current thread to be suspended, in which case it needs to wait until the thread wakes up (otherwise the signal may cause unexpected results)
326+
finished = false;
327+
return 0;
328+
}
329+
330+
for (auto cpu = +cond.sq; cpu; cpu = cpu->next_cpu)
331+
{
332+
if (cpu->id == thread_id)
248333
{
249-
ppu.state += cpu_flag::again;
250-
return 0;
251-
}
334+
if (static_cast<ppu_thread*>(cpu)->state & cpu_flag::again)
335+
{
336+
ppu.state += cpu_flag::again;
337+
return 0;
338+
}
252339

253-
ensure(cond.unqueue(cond.sq, cpu));
340+
ensure(cond.unqueue(cond.sq, cpu));
254341

255-
if (cond.mutex->try_own(*cpu))
256-
{
257-
cond.awake(cpu);
342+
if (cond.mutex->try_own(*cpu))
343+
{
344+
cond.awake(cpu);
345+
}
346+
347+
return 1;
258348
}
349+
}
350+
}
351+
else
352+
{
353+
cond.mutex->mutex.lock_unlock();
259354

260-
return 1;
355+
if (ppu.state & cpu_flag::suspend)
356+
{
357+
finished = false;
358+
return 0;
261359
}
262360
}
361+
362+
return 0;
363+
});
364+
365+
if (!finished)
366+
{
367+
continue;
263368
}
264369

265-
return 0;
266-
});
370+
if (!cond || cond.ret == -1)
371+
{
372+
return CELL_ESRCH;
373+
}
267374

268-
if (!cond || cond.ret == -1)
269-
{
270-
return CELL_ESRCH;
271-
}
375+
if (!cond.ret)
376+
{
377+
return not_an_error(CELL_EPERM);
378+
}
272379

273-
if (!cond.ret)
274-
{
275-
return not_an_error(CELL_EPERM);
380+
return CELL_OK;
276381
}
277-
278-
return CELL_OK;
279382
}
280383

281384
error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout)

0 commit comments

Comments
 (0)