@@ -4322,6 +4322,98 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
4322
4322
return ret ;
4323
4323
}
4324
4324
4325
+ /*
4326
+ * Submit one subpage btree page.
4327
+ *
4328
+ * The main difference to submit_eb_page() is:
4329
+ * - Page locking
4330
+ * For subpage, we don't rely on page locking at all.
4331
+ *
4332
+ * - Flush write bio
4333
+ * We only flush bio if we may be unable to fit current extent buffers into
4334
+ * current bio.
4335
+ *
4336
+ * Return >=0 for the number of submitted extent buffers.
4337
+ * Return <0 for fatal error.
4338
+ */
4339
+ static int submit_eb_subpage (struct page * page ,
4340
+ struct writeback_control * wbc ,
4341
+ struct extent_page_data * epd )
4342
+ {
4343
+ struct btrfs_fs_info * fs_info = btrfs_sb (page -> mapping -> host -> i_sb );
4344
+ int submitted = 0 ;
4345
+ u64 page_start = page_offset (page );
4346
+ int bit_start = 0 ;
4347
+ const int nbits = BTRFS_SUBPAGE_BITMAP_SIZE ;
4348
+ int sectors_per_node = fs_info -> nodesize >> fs_info -> sectorsize_bits ;
4349
+ int ret ;
4350
+
4351
+ /* Lock and write each dirty extent buffers in the range */
4352
+ while (bit_start < nbits ) {
4353
+ struct btrfs_subpage * subpage = (struct btrfs_subpage * )page -> private ;
4354
+ struct extent_buffer * eb ;
4355
+ unsigned long flags ;
4356
+ u64 start ;
4357
+
4358
+ /*
4359
+ * Take private lock to ensure the subpage won't be detached
4360
+ * in the meantime.
4361
+ */
4362
+ spin_lock (& page -> mapping -> private_lock );
4363
+ if (!PagePrivate (page )) {
4364
+ spin_unlock (& page -> mapping -> private_lock );
4365
+ break ;
4366
+ }
4367
+ spin_lock_irqsave (& subpage -> lock , flags );
4368
+ if (!((1 << bit_start ) & subpage -> dirty_bitmap )) {
4369
+ spin_unlock_irqrestore (& subpage -> lock , flags );
4370
+ spin_unlock (& page -> mapping -> private_lock );
4371
+ bit_start ++ ;
4372
+ continue ;
4373
+ }
4374
+
4375
+ start = page_start + bit_start * fs_info -> sectorsize ;
4376
+ bit_start += sectors_per_node ;
4377
+
4378
+ /*
4379
+ * Here we just want to grab the eb without touching extra
4380
+ * spin locks, so call find_extent_buffer_nolock().
4381
+ */
4382
+ eb = find_extent_buffer_nolock (fs_info , start );
4383
+ spin_unlock_irqrestore (& subpage -> lock , flags );
4384
+ spin_unlock (& page -> mapping -> private_lock );
4385
+
4386
+ /*
4387
+ * The eb has already reached 0 refs thus find_extent_buffer()
4388
+ * doesn't return it. We don't need to write back such eb
4389
+ * anyway.
4390
+ */
4391
+ if (!eb )
4392
+ continue ;
4393
+
4394
+ ret = lock_extent_buffer_for_io (eb , epd );
4395
+ if (ret == 0 ) {
4396
+ free_extent_buffer (eb );
4397
+ continue ;
4398
+ }
4399
+ if (ret < 0 ) {
4400
+ free_extent_buffer (eb );
4401
+ goto cleanup ;
4402
+ }
4403
+ ret = write_one_eb (eb , wbc , epd );
4404
+ free_extent_buffer (eb );
4405
+ if (ret < 0 )
4406
+ goto cleanup ;
4407
+ submitted ++ ;
4408
+ }
4409
+ return submitted ;
4410
+
4411
+ cleanup :
4412
+ /* We hit error, end bio for the submitted extent buffers */
4413
+ end_write_bio (epd , ret );
4414
+ return ret ;
4415
+ }
4416
+
4325
4417
/*
4326
4418
* Submit all page(s) of one extent buffer.
4327
4419
*
@@ -4354,6 +4446,9 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
4354
4446
if (!PagePrivate (page ))
4355
4447
return 0 ;
4356
4448
4449
+ if (btrfs_sb (page -> mapping -> host -> i_sb )-> sectorsize < PAGE_SIZE )
4450
+ return submit_eb_subpage (page , wbc , epd );
4451
+
4357
4452
spin_lock (& mapping -> private_lock );
4358
4453
if (!PagePrivate (page )) {
4359
4454
spin_unlock (& mapping -> private_lock );
0 commit comments