77#include "blk-mq.h"
88#include "blk-mq-tag.h"
99
10- void blk_mq_wait_for_tags (struct blk_mq_tags * tags , struct blk_mq_hw_ctx * hctx ,
11- bool reserved )
10+ void blk_mq_wait_for_tags (struct blk_mq_hw_ctx * hctx , bool reserved )
1211{
1312 int tag , zero = 0 ;
1413
15- tag = blk_mq_get_tag (tags , hctx , & zero , __GFP_WAIT , reserved );
16- blk_mq_put_tag (tags , tag , & zero );
14+ tag = blk_mq_get_tag (hctx , & zero , __GFP_WAIT , reserved );
15+ blk_mq_put_tag (hctx , tag , & zero );
1716}
1817
1918static bool bt_has_free_tags (struct blk_mq_bitmap_tags * bt )
@@ -40,6 +39,84 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
4039 return bt_has_free_tags (& tags -> bitmap_tags );
4140}
4241
42+ static inline void bt_index_inc (unsigned int * index )
43+ {
44+ * index = (* index + 1 ) & (BT_WAIT_QUEUES - 1 );
45+ }
46+
47+ /*
48+ * If a previously inactive queue goes active, bump the active user count.
49+ */
50+ bool __blk_mq_tag_busy (struct blk_mq_hw_ctx * hctx )
51+ {
52+ if (!test_bit (BLK_MQ_S_TAG_ACTIVE , & hctx -> state ) &&
53+ !test_and_set_bit (BLK_MQ_S_TAG_ACTIVE , & hctx -> state ))
54+ atomic_inc (& hctx -> tags -> active_queues );
55+
56+ return true;
57+ }
58+
59+ /*
60+ * If a previously busy queue goes inactive, potential waiters could now
61+ * be allowed to queue. Wake them up and check.
62+ */
63+ void __blk_mq_tag_idle (struct blk_mq_hw_ctx * hctx )
64+ {
65+ struct blk_mq_tags * tags = hctx -> tags ;
66+ struct blk_mq_bitmap_tags * bt ;
67+ int i , wake_index ;
68+
69+ if (!test_and_clear_bit (BLK_MQ_S_TAG_ACTIVE , & hctx -> state ))
70+ return ;
71+
72+ atomic_dec (& tags -> active_queues );
73+
74+ /*
75+ * Will only throttle depth on non-reserved tags
76+ */
77+ bt = & tags -> bitmap_tags ;
78+ wake_index = bt -> wake_index ;
79+ for (i = 0 ; i < BT_WAIT_QUEUES ; i ++ ) {
80+ struct bt_wait_state * bs = & bt -> bs [wake_index ];
81+
82+ if (waitqueue_active (& bs -> wait ))
83+ wake_up (& bs -> wait );
84+
85+ bt_index_inc (& wake_index );
86+ }
87+ }
88+
89+ /*
90+ * For shared tag users, we track the number of currently active users
91+ * and attempt to provide a fair share of the tag depth for each of them.
92+ */
93+ static inline bool hctx_may_queue (struct blk_mq_hw_ctx * hctx ,
94+ struct blk_mq_bitmap_tags * bt )
95+ {
96+ unsigned int depth , users ;
97+
98+ if (!hctx || !(hctx -> flags & BLK_MQ_F_TAG_SHARED ))
99+ return true;
100+ if (!test_bit (BLK_MQ_S_TAG_ACTIVE , & hctx -> state ))
101+ return true;
102+
103+ /*
104+ * Don't try dividing an ant
105+ */
106+ if (bt -> depth == 1 )
107+ return true;
108+
109+ users = atomic_read (& hctx -> tags -> active_queues );
110+ if (!users )
111+ return true;
112+
113+ /*
114+ * Allow at least some tags
115+ */
116+ depth = max ((bt -> depth + users - 1 ) / users , 4U );
117+ return atomic_read (& hctx -> nr_active ) < depth ;
118+ }
119+
43120static int __bt_get_word (struct blk_mq_bitmap * bm , unsigned int last_tag )
44121{
45122 int tag , org_last_tag , end ;
@@ -78,11 +155,15 @@ static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag)
78155 * multiple users will tend to stick to different cachelines, at least
79156 * until the map is exhausted.
80157 */
81- static int __bt_get (struct blk_mq_bitmap_tags * bt , unsigned int * tag_cache )
158+ static int __bt_get (struct blk_mq_hw_ctx * hctx , struct blk_mq_bitmap_tags * bt ,
159+ unsigned int * tag_cache )
82160{
83161 unsigned int last_tag , org_last_tag ;
84162 int index , i , tag ;
85163
164+ if (!hctx_may_queue (hctx , bt ))
165+ return -1 ;
166+
86167 last_tag = org_last_tag = * tag_cache ;
87168 index = TAG_TO_INDEX (bt , last_tag );
88169
@@ -117,11 +198,6 @@ static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache)
117198 return tag ;
118199}
119200
120- static inline void bt_index_inc (unsigned int * index )
121- {
122- * index = (* index + 1 ) & (BT_WAIT_QUEUES - 1 );
123- }
124-
125201static struct bt_wait_state * bt_wait_ptr (struct blk_mq_bitmap_tags * bt ,
126202 struct blk_mq_hw_ctx * hctx )
127203{
@@ -142,7 +218,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx,
142218 DEFINE_WAIT (wait );
143219 int tag ;
144220
145- tag = __bt_get (bt , last_tag );
221+ tag = __bt_get (hctx , bt , last_tag );
146222 if (tag != -1 )
147223 return tag ;
148224
@@ -156,7 +232,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx,
156232 was_empty = list_empty (& wait .task_list );
157233 prepare_to_wait (& bs -> wait , & wait , TASK_UNINTERRUPTIBLE );
158234
159- tag = __bt_get (bt , last_tag );
235+ tag = __bt_get (hctx , bt , last_tag );
160236 if (tag != -1 )
161237 break ;
162238
@@ -200,14 +276,13 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
200276 return tag ;
201277}
202278
203- unsigned int blk_mq_get_tag (struct blk_mq_tags * tags ,
204- struct blk_mq_hw_ctx * hctx , unsigned int * last_tag ,
279+ unsigned int blk_mq_get_tag (struct blk_mq_hw_ctx * hctx , unsigned int * last_tag ,
205280 gfp_t gfp , bool reserved )
206281{
207282 if (!reserved )
208- return __blk_mq_get_tag (tags , hctx , last_tag , gfp );
283+ return __blk_mq_get_tag (hctx -> tags , hctx , last_tag , gfp );
209284
210- return __blk_mq_get_reserved_tag (tags , gfp );
285+ return __blk_mq_get_reserved_tag (hctx -> tags , gfp );
211286}
212287
213288static struct bt_wait_state * bt_wake_ptr (struct blk_mq_bitmap_tags * bt )
@@ -265,9 +340,11 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
265340 bt_clear_tag (& tags -> breserved_tags , tag );
266341}
267342
268- void blk_mq_put_tag (struct blk_mq_tags * tags , unsigned int tag ,
343+ void blk_mq_put_tag (struct blk_mq_hw_ctx * hctx , unsigned int tag ,
269344 unsigned int * last_tag )
270345{
346+ struct blk_mq_tags * tags = hctx -> tags ;
347+
271348 if (tag >= tags -> nr_reserved_tags ) {
272349 const int real_tag = tag - tags -> nr_reserved_tags ;
273350
@@ -465,6 +542,7 @@ ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
465542 res = bt_unused_tags (& tags -> breserved_tags );
466543
467544 page += sprintf (page , "nr_free=%u, nr_reserved=%u\n" , free , res );
545+ page += sprintf (page , "active_queues=%u\n" , atomic_read (& tags -> active_queues ));
468546
469547 return page - orig_page ;
470548}
0 commit comments