dm: mark request_queue dead before destroying the DM device
This avoids that new requests are queued while __dm_destroy() is in progress. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Cc: stable@vger.kernel.org
This commit is contained in:
committed by
Mike Snitzer
parent
8dc23658b7
commit
3b785fbcf8
@@ -1873,6 +1873,7 @@ EXPORT_SYMBOL_GPL(dm_device_name);
|
|||||||
|
|
||||||
static void __dm_destroy(struct mapped_device *md, bool wait)
|
static void __dm_destroy(struct mapped_device *md, bool wait)
|
||||||
{
|
{
|
||||||
|
struct request_queue *q = dm_get_md_queue(md);
|
||||||
struct dm_table *map;
|
struct dm_table *map;
|
||||||
int srcu_idx;
|
int srcu_idx;
|
||||||
|
|
||||||
@@ -1883,6 +1884,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
|
|||||||
set_bit(DMF_FREEING, &md->flags);
|
set_bit(DMF_FREEING, &md->flags);
|
||||||
spin_unlock(&_minor_lock);
|
spin_unlock(&_minor_lock);
|
||||||
|
|
||||||
|
spin_lock_irq(q->queue_lock);
|
||||||
|
queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
if (dm_request_based(md) && md->kworker_task)
|
if (dm_request_based(md) && md->kworker_task)
|
||||||
flush_kthread_worker(&md->kworker);
|
flush_kthread_worker(&md->kworker);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user