diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index bdfe97f24a889..309173927fef9 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -151,10 +151,10 @@ static void inode_io_list_del_locked(struct inode *inode, static void wb_wakeup(struct bdi_writeback *wb) { - spin_lock_bh(&wb->work_lock); + spin_lock_irq(&wb->work_lock); if (test_bit(WB_registered, &wb->state)) mod_delayed_work(bdi_wq, &wb->dwork, 0); - spin_unlock_bh(&wb->work_lock); + spin_unlock_irq(&wb->work_lock); } static void finish_writeback_work(struct bdi_writeback *wb, @@ -181,7 +181,7 @@ static void wb_queue_work(struct bdi_writeback *wb, if (work->done) atomic_inc(&work->done->cnt); - spin_lock_bh(&wb->work_lock); + spin_lock_irq(&wb->work_lock); if (test_bit(WB_registered, &wb->state)) { list_add_tail(&work->list, &wb->work_list); @@ -189,7 +189,7 @@ static void wb_queue_work(struct bdi_writeback *wb, } else finish_writeback_work(wb, work); - spin_unlock_bh(&wb->work_lock); + spin_unlock_irq(&wb->work_lock); } /** @@ -1938,13 +1938,13 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) { struct wb_writeback_work *work = NULL; - spin_lock_bh(&wb->work_lock); + spin_lock_irq(&wb->work_lock); if (!list_empty(&wb->work_list)) { work = list_entry(wb->work_list.next, struct wb_writeback_work, list); list_del_init(&work->list); } - spin_unlock_bh(&wb->work_lock); + spin_unlock_irq(&wb->work_lock); return work; } diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 975a4a637ab30..86831e0f0a687 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -280,10 +280,10 @@ void wb_wakeup_delayed(struct bdi_writeback *wb) unsigned long timeout; timeout = msecs_to_jiffies(dirty_writeback_interval * 10); - spin_lock_bh(&wb->work_lock); + spin_lock_irq(&wb->work_lock); if (test_bit(WB_registered, &wb->state)) queue_delayed_work(bdi_wq, &wb->dwork, timeout); - spin_unlock_bh(&wb->work_lock); + spin_unlock_irq(&wb->work_lock); } static void wb_update_bandwidth_workfn(struct work_struct *work) @@ -376,12 +376,12 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb); static void wb_shutdown(struct bdi_writeback *wb) { /* Make sure nobody queues further work */ - spin_lock_bh(&wb->work_lock); + spin_lock_irq(&wb->work_lock); if (!test_and_clear_bit(WB_registered, &wb->state)) { - spin_unlock_bh(&wb->work_lock); + spin_unlock_irq(&wb->work_lock); return; } - spin_unlock_bh(&wb->work_lock); + spin_unlock_irq(&wb->work_lock); cgwb_remove_from_bdi_list(wb); /* diff --git a/mm/page-writeback.c b/mm/page-writeback.c index a2ff3d0492466..5619991ab77b2 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2754,6 +2754,7 @@ static void wb_inode_writeback_start(struct bdi_writeback *wb) static void wb_inode_writeback_end(struct bdi_writeback *wb) { + unsigned long flags; atomic_dec(&wb->writeback_inodes); /* * Make sure estimate of writeback throughput gets updated after @@ -2762,7 +2763,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb) * that if multiple inodes end writeback at a similar time, they get * batched into one bandwidth update. */ - queue_delayed_work(bdi_wq, wb->bw_dwork, BANDWIDTH_INTERVAL); + spin_lock_irqsave(&wb->work_lock, flags); + if (test_bit(WB_registered, &wb->state)) + queue_delayed_work(bdi_wq, wb->bw_dwork, BANDWIDTH_INTERVAL); + spin_unlock_irqrestore(&wb->work_lock, flags); } int test_clear_page_writeback(struct page *page)