Commit 922f723f authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior
Browse files

drm/i915/gt: Use spin_lock_irq() instead of local_irq_disable() + spin_lock()



execlists_dequeue() is invoked from a function which uses
local_irq_disable() to disable interrupts so the spin_lock() behaves
like spin_lock_irq().
This breaks PREEMPT_RT because local_irq_disable() + spin_lock() is not
the same as spin_lock_irq().

execlists_dequeue_irq() and execlists_dequeue() has each one caller
only. If intel_engine_cs::active::lock is acquired and released with the
_irq suffix then it behaves almost as if execlists_dequeue() would be
invoked with disabled interrupts. The difference is the last part of the
function which is then invoked with enabled interrupts.
I can't tell if this makes a difference. From looking at it, it might
work to move the last unlock at the end of the function as I didn't find
anything that would acquire the lock again.
Reported-by: default avatarClark Williams <williams@redhat.com>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
parent e3b017d5
......@@ -1265,7 +1265,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
spin_lock(&engine->active.lock);
spin_lock_irq(&engine->active.lock);
/*
* If the queue is higher priority than the last
......@@ -1365,7 +1365,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
spin_unlock(&engine->active.lock);
spin_unlock_irq(&engine->active.lock);
return;
}
}
......@@ -1391,7 +1391,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
spin_unlock(&engine->active.lock);
spin_unlock_irq(&engine->active.lock);
return; /* leave this for another sibling */
}
......@@ -1552,7 +1552,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* interrupt for secondary ports).
*/
execlists->queue_priority_hint = queue_prio(execlists);
spin_unlock(&engine->active.lock);
spin_unlock_irq(&engine->active.lock);
/*
* We can skip poking the HW if we ended up with exactly the same set
......@@ -1578,13 +1578,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
}
static void execlists_dequeue_irq(struct intel_engine_cs *engine)
{
local_irq_disable(); /* Suspend interrupts across request submission */
execlists_dequeue(engine);
local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
}
static void clear_ports(struct i915_request **ports, int count)
{
memset_p((void **)ports, NULL, count);
......@@ -2377,7 +2370,7 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
}
if (!engine->execlists.pending[0]) {
execlists_dequeue_irq(engine);
execlists_dequeue(engine);
start_timeslice(engine);
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment