@@ -100,25 +100,66 @@ rust_scheduler::number_of_live_tasks() {
100
100
void
101
101
rust_scheduler::reap_dead_tasks (int id) {
102
102
I (this , lock.lock_held_by_current_thread ());
103
- for (size_t i = 0 ; i < dead_tasks.length (); ) {
103
+ if (dead_tasks.length () == 0 ) {
104
+ return ;
105
+ }
106
+
107
+ // First make up copy of the dead_task list with the lock held
108
+ size_t dead_tasks_len = dead_tasks.length ();
109
+ rust_task **dead_tasks_copy = (rust_task**)
110
+ srv->malloc (sizeof (rust_task*) * dead_tasks_len);
111
+ for (size_t i = 0 ; i < dead_tasks_len; ++i) {
104
112
rust_task *task = dead_tasks[i];
113
+ dead_tasks_copy[i] = task;
114
+ }
115
+
116
+ // Now drop the lock and futz with the tasks. This avoids establishing
117
+ // a sched->lock then task->lock locking order, which would be devestating
118
+ // to performance.
119
+ lock.unlock ();
120
+
121
+ for (size_t i = 0 ; i < dead_tasks_len; ++i) {
122
+ rust_task *task = dead_tasks_copy[i];
105
123
task->lock .lock ();
106
124
// Make sure this task isn't still running somewhere else...
107
125
if (task->can_schedule (id)) {
108
126
I (this , task->tasks_waiting_to_join .is_empty ());
109
- dead_tasks.remove (task);
110
127
DLOG (this , task,
111
128
" deleting unreferenced dead task %s @0x%" PRIxPTR,
112
129
task->name , task);
113
130
task->lock .unlock ();
131
+ } else {
132
+ task->lock .unlock ();
133
+ dead_tasks_copy[i] = NULL ;
134
+ }
135
+ }
136
+
137
+ // Now grab the lock again and remove the tasks that were truly dead
138
+ lock.lock ();
139
+
140
+ for (size_t i = 0 ; i < dead_tasks_len; ++i) {
141
+ rust_task *task = dead_tasks_copy[i];
142
+ if (task) {
143
+ dead_tasks.remove (task);
144
+ }
145
+ }
146
+
147
+ // Now unlock again because we have to actually free the dead tasks,
148
+ // and that may end up wanting to do lock the task and sched locks
149
+ // again (via target->send)
150
+ lock.unlock ();
151
+
152
+ for (size_t i = 0 ; i < dead_tasks_len; ++i) {
153
+ rust_task *task = dead_tasks_copy[i];
154
+ if (task) {
114
155
task->deref ();
115
156
sync::decrement (kernel->live_tasks );
116
157
kernel->wakeup_schedulers ();
117
- continue ;
118
158
}
119
- task->lock .unlock ();
120
- ++i;
121
159
}
160
+ srv->free (dead_tasks_copy);
161
+
162
+ lock.lock ();
122
163
}
123
164
124
165
/* *
0 commit comments