~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-sysfs.c

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Functions related to sysfs handling
  4  */
  5 #include <linux/kernel.h>
  6 #include <linux/slab.h>
  7 #include <linux/module.h>
  8 #include <linux/bio.h>
  9 #include <linux/blkdev.h>
 10 #include <linux/backing-dev.h>
 11 #include <linux/blktrace_api.h>
 12 #include <linux/blk-mq.h>
 13 #include <linux/blk-cgroup.h>
 14 
 15 #include "blk.h"
 16 #include "blk-mq.h"
 17 #include "blk-mq-debugfs.h"
 18 #include "blk-wbt.h"
 19 
 20 struct queue_sysfs_entry {
 21         struct attribute attr;
 22         ssize_t (*show)(struct request_queue *, char *);
 23         ssize_t (*store)(struct request_queue *, const char *, size_t);
 24 };
 25 
 26 static ssize_t
 27 queue_var_show(unsigned long var, char *page)
 28 {
 29         return sprintf(page, "%lu\n", var);
 30 }
 31 
 32 static ssize_t
 33 queue_var_store(unsigned long *var, const char *page, size_t count)
 34 {
 35         int err;
 36         unsigned long v;
 37 
 38         err = kstrtoul(page, 10, &v);
 39         if (err || v > UINT_MAX)
 40                 return -EINVAL;
 41 
 42         *var = v;
 43 
 44         return count;
 45 }
 46 
 47 static ssize_t queue_var_store64(s64 *var, const char *page)
 48 {
 49         int err;
 50         s64 v;
 51 
 52         err = kstrtos64(page, 10, &v);
 53         if (err < 0)
 54                 return err;
 55 
 56         *var = v;
 57         return 0;
 58 }
 59 
 60 static ssize_t queue_requests_show(struct request_queue *q, char *page)
 61 {
 62         return queue_var_show(q->nr_requests, (page));
 63 }
 64 
 65 static ssize_t
 66 queue_requests_store(struct request_queue *q, const char *page, size_t count)
 67 {
 68         unsigned long nr;
 69         int ret, err;
 70 
 71         if (!queue_is_mq(q))
 72                 return -EINVAL;
 73 
 74         ret = queue_var_store(&nr, page, count);
 75         if (ret < 0)
 76                 return ret;
 77 
 78         if (nr < BLKDEV_MIN_RQ)
 79                 nr = BLKDEV_MIN_RQ;
 80 
 81         err = blk_mq_update_nr_requests(q, nr);
 82         if (err)
 83                 return err;
 84 
 85         return ret;
 86 }
 87 
 88 static ssize_t queue_ra_show(struct request_queue *q, char *page)
 89 {
 90         unsigned long ra_kb = q->backing_dev_info->ra_pages <<
 91                                         (PAGE_SHIFT - 10);
 92 
 93         return queue_var_show(ra_kb, (page));
 94 }
 95 
 96 static ssize_t
 97 queue_ra_store(struct request_queue *q, const char *page, size_t count)
 98 {
 99         unsigned long ra_kb;
100         ssize_t ret = queue_var_store(&ra_kb, page, count);
101 
102         if (ret < 0)
103                 return ret;
104 
105         q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
106 
107         return ret;
108 }
109 
110 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
111 {
112         int max_sectors_kb = queue_max_sectors(q) >> 1;
113 
114         return queue_var_show(max_sectors_kb, (page));
115 }
116 
117 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
118 {
119         return queue_var_show(queue_max_segments(q), (page));
120 }
121 
122 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
123                 char *page)
124 {
125         return queue_var_show(queue_max_discard_segments(q), (page));
126 }
127 
128 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
129 {
130         return queue_var_show(q->limits.max_integrity_segments, (page));
131 }
132 
133 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
134 {
135         return queue_var_show(queue_max_segment_size(q), (page));
136 }
137 
138 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
139 {
140         return queue_var_show(queue_logical_block_size(q), page);
141 }
142 
143 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
144 {
145         return queue_var_show(queue_physical_block_size(q), page);
146 }
147 
148 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
149 {
150         return queue_var_show(q->limits.chunk_sectors, page);
151 }
152 
153 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
154 {
155         return queue_var_show(queue_io_min(q), page);
156 }
157 
158 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
159 {
160         return queue_var_show(queue_io_opt(q), page);
161 }
162 
163 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
164 {
165         return queue_var_show(q->limits.discard_granularity, page);
166 }
167 
168 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
169 {
170 
171         return sprintf(page, "%llu\n",
172                 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
173 }
174 
175 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
176 {
177         return sprintf(page, "%llu\n",
178                        (unsigned long long)q->limits.max_discard_sectors << 9);
179 }
180 
181 static ssize_t queue_discard_max_store(struct request_queue *q,
182                                        const char *page, size_t count)
183 {
184         unsigned long max_discard;
185         ssize_t ret = queue_var_store(&max_discard, page, count);
186 
187         if (ret < 0)
188                 return ret;
189 
190         if (max_discard & (q->limits.discard_granularity - 1))
191                 return -EINVAL;
192 
193         max_discard >>= 9;
194         if (max_discard > UINT_MAX)
195                 return -EINVAL;
196 
197         if (max_discard > q->limits.max_hw_discard_sectors)
198                 max_discard = q->limits.max_hw_discard_sectors;
199 
200         q->limits.max_discard_sectors = max_discard;
201         return ret;
202 }
203 
204 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
205 {
206         return queue_var_show(0, page);
207 }
208 
209 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
210 {
211         return sprintf(page, "%llu\n",
212                 (unsigned long long)q->limits.max_write_same_sectors << 9);
213 }
214 
215 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
216 {
217         return sprintf(page, "%llu\n",
218                 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
219 }
220 
221 static ssize_t
222 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
223 {
224         unsigned long max_sectors_kb,
225                 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
226                         page_kb = 1 << (PAGE_SHIFT - 10);
227         ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
228 
229         if (ret < 0)
230                 return ret;
231 
232         max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
233                                          q->limits.max_dev_sectors >> 1);
234 
235         if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
236                 return -EINVAL;
237 
238         spin_lock_irq(&q->queue_lock);
239         q->limits.max_sectors = max_sectors_kb << 1;
240         q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
241         spin_unlock_irq(&q->queue_lock);
242 
243         return ret;
244 }
245 
246 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
247 {
248         int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
249 
250         return queue_var_show(max_hw_sectors_kb, (page));
251 }
252 
253 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)                            \
254 static ssize_t                                                          \
255 queue_show_##name(struct request_queue *q, char *page)                  \
256 {                                                                       \
257         int bit;                                                        \
258         bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);             \
259         return queue_var_show(neg ? !bit : bit, page);                  \
260 }                                                                       \
261 static ssize_t                                                          \
262 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
263 {                                                                       \
264         unsigned long val;                                              \
265         ssize_t ret;                                                    \
266         ret = queue_var_store(&val, page, count);                       \
267         if (ret < 0)                                                    \
268                  return ret;                                            \
269         if (neg)                                                        \
270                 val = !val;                                             \
271                                                                         \
272         if (val)                                                        \
273                 blk_queue_flag_set(QUEUE_FLAG_##flag, q);               \
274         else                                                            \
275                 blk_queue_flag_clear(QUEUE_FLAG_##flag, q);             \
276         return ret;                                                     \
277 }
278 
279 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
280 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
281 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
282 #undef QUEUE_SYSFS_BIT_FNS
283 
284 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
285 {
286         switch (blk_queue_zoned_model(q)) {
287         case BLK_ZONED_HA:
288                 return sprintf(page, "host-aware\n");
289         case BLK_ZONED_HM:
290                 return sprintf(page, "host-managed\n");
291         default:
292                 return sprintf(page, "none\n");
293         }
294 }
295 
296 static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
297 {
298         return queue_var_show(blk_queue_nr_zones(q), page);
299 }
300 
301 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
302 {
303         return queue_var_show((blk_queue_nomerges(q) << 1) |
304                                blk_queue_noxmerges(q), page);
305 }
306 
307 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
308                                     size_t count)
309 {
310         unsigned long nm;
311         ssize_t ret = queue_var_store(&nm, page, count);
312 
313         if (ret < 0)
314                 return ret;
315 
316         blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
317         blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
318         if (nm == 2)
319                 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
320         else if (nm)
321                 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
322 
323         return ret;
324 }
325 
326 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
327 {
328         bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
329         bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
330 
331         return queue_var_show(set << force, page);
332 }
333 
334 static ssize_t
335 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
336 {
337         ssize_t ret = -EINVAL;
338 #ifdef CONFIG_SMP
339         unsigned long val;
340 
341         ret = queue_var_store(&val, page, count);
342         if (ret < 0)
343                 return ret;
344 
345         if (val == 2) {
346                 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
347                 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
348         } else if (val == 1) {
349                 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
350                 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
351         } else if (val == 0) {
352                 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
353                 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
354         }
355 #endif
356         return ret;
357 }
358 
359 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
360 {
361         int val;
362 
363         if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
364                 val = BLK_MQ_POLL_CLASSIC;
365         else
366                 val = q->poll_nsec / 1000;
367 
368         return sprintf(page, "%d\n", val);
369 }
370 
371 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
372                                 size_t count)
373 {
374         int err, val;
375 
376         if (!q->mq_ops || !q->mq_ops->poll)
377                 return -EINVAL;
378 
379         err = kstrtoint(page, 10, &val);
380         if (err < 0)
381                 return err;
382 
383         if (val == BLK_MQ_POLL_CLASSIC)
384                 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
385         else if (val >= 0)
386                 q->poll_nsec = val * 1000;
387         else
388                 return -EINVAL;
389 
390         return count;
391 }
392 
393 static ssize_t queue_poll_show(struct request_queue *q, char *page)
394 {
395         return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
396 }
397 
398 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
399                                 size_t count)
400 {
401         unsigned long poll_on;
402         ssize_t ret;
403 
404         if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
405             !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
406                 return -EINVAL;
407 
408         ret = queue_var_store(&poll_on, page, count);
409         if (ret < 0)
410                 return ret;
411 
412         if (poll_on)
413                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
414         else
415                 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
416 
417         return ret;
418 }
419 
420 static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
421 {
422         return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
423 }
424 
425 static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
426                                   size_t count)
427 {
428         unsigned int val;
429         int err;
430 
431         err = kstrtou32(page, 10, &val);
432         if (err || val == 0)
433                 return -EINVAL;
434 
435         blk_queue_rq_timeout(q, msecs_to_jiffies(val));
436 
437         return count;
438 }
439 
440 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
441 {
442         if (!wbt_rq_qos(q))
443                 return -EINVAL;
444 
445         return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
446 }
447 
448 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
449                                   size_t count)
450 {
451         struct rq_qos *rqos;
452         ssize_t ret;
453         s64 val;
454 
455         ret = queue_var_store64(&val, page);
456         if (ret < 0)
457                 return ret;
458         if (val < -1)
459                 return -EINVAL;
460 
461         rqos = wbt_rq_qos(q);
462         if (!rqos) {
463                 ret = wbt_init(q);
464                 if (ret)
465                         return ret;
466         }
467 
468         if (val == -1)
469                 val = wbt_default_latency_nsec(q);
470         else if (val >= 0)
471                 val *= 1000ULL;
472 
473         if (wbt_get_min_lat(q) == val)
474                 return count;
475 
476         /*
477          * Ensure that the queue is idled, in case the latency update
478          * ends up either enabling or disabling wbt completely. We can't
479          * have IO inflight if that happens.
480          */
481         blk_mq_freeze_queue(q);
482         blk_mq_quiesce_queue(q);
483 
484         wbt_set_min_lat(q, val);
485         wbt_update_limits(q);
486 
487         blk_mq_unquiesce_queue(q);
488         blk_mq_unfreeze_queue(q);
489 
490         return count;
491 }
492 
493 static ssize_t queue_wc_show(struct request_queue *q, char *page)
494 {
495         if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
496                 return sprintf(page, "write back\n");
497 
498         return sprintf(page, "write through\n");
499 }
500 
501 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
502                               size_t count)
503 {
504         int set = -1;
505 
506         if (!strncmp(page, "write back", 10))
507                 set = 1;
508         else if (!strncmp(page, "write through", 13) ||
509                  !strncmp(page, "none", 4))
510                 set = 0;
511 
512         if (set == -1)
513                 return -EINVAL;
514 
515         if (set)
516                 blk_queue_flag_set(QUEUE_FLAG_WC, q);
517         else
518                 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
519 
520         return count;
521 }
522 
523 static ssize_t queue_fua_show(struct request_queue *q, char *page)
524 {
525         return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
526 }
527 
528 static ssize_t queue_dax_show(struct request_queue *q, char *page)
529 {
530         return queue_var_show(blk_queue_dax(q), page);
531 }
532 
533 static struct queue_sysfs_entry queue_requests_entry = {
534         .attr = {.name = "nr_requests", .mode = 0644 },
535         .show = queue_requests_show,
536         .store = queue_requests_store,
537 };
538 
539 static struct queue_sysfs_entry queue_ra_entry = {
540         .attr = {.name = "read_ahead_kb", .mode = 0644 },
541         .show = queue_ra_show,
542         .store = queue_ra_store,
543 };
544 
545 static struct queue_sysfs_entry queue_max_sectors_entry = {
546         .attr = {.name = "max_sectors_kb", .mode = 0644 },
547         .show = queue_max_sectors_show,
548         .store = queue_max_sectors_store,
549 };
550 
551 static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
552         .attr = {.name = "max_hw_sectors_kb", .mode = 0444 },
553         .show = queue_max_hw_sectors_show,
554 };
555 
556 static struct queue_sysfs_entry queue_max_segments_entry = {
557         .attr = {.name = "max_segments", .mode = 0444 },
558         .show = queue_max_segments_show,
559 };
560 
561 static struct queue_sysfs_entry queue_max_discard_segments_entry = {
562         .attr = {.name = "max_discard_segments", .mode = 0444 },
563         .show = queue_max_discard_segments_show,
564 };
565 
566 static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
567         .attr = {.name = "max_integrity_segments", .mode = 0444 },
568         .show = queue_max_integrity_segments_show,
569 };
570 
571 static struct queue_sysfs_entry queue_max_segment_size_entry = {
572         .attr = {.name = "max_segment_size", .mode = 0444 },
573         .show = queue_max_segment_size_show,
574 };
575 
576 static struct queue_sysfs_entry queue_iosched_entry = {
577         .attr = {.name = "scheduler", .mode = 0644 },
578         .show = elv_iosched_show,
579         .store = elv_iosched_store,
580 };
581 
582 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
583         .attr = {.name = "hw_sector_size", .mode = 0444 },
584         .show = queue_logical_block_size_show,
585 };
586 
587 static struct queue_sysfs_entry queue_logical_block_size_entry = {
588         .attr = {.name = "logical_block_size", .mode = 0444 },
589         .show = queue_logical_block_size_show,
590 };
591 
592 static struct queue_sysfs_entry queue_physical_block_size_entry = {
593         .attr = {.name = "physical_block_size", .mode = 0444 },
594         .show = queue_physical_block_size_show,
595 };
596 
597 static struct queue_sysfs_entry queue_chunk_sectors_entry = {
598         .attr = {.name = "chunk_sectors", .mode = 0444 },
599         .show = queue_chunk_sectors_show,
600 };
601 
602 static struct queue_sysfs_entry queue_io_min_entry = {
603         .attr = {.name = "minimum_io_size", .mode = 0444 },
604         .show = queue_io_min_show,
605 };
606 
607 static struct queue_sysfs_entry queue_io_opt_entry = {
608         .attr = {.name = "optimal_io_size", .mode = 0444 },
609         .show = queue_io_opt_show,
610 };
611 
612 static struct queue_sysfs_entry queue_discard_granularity_entry = {
613         .attr = {.name = "discard_granularity", .mode = 0444 },
614         .show = queue_discard_granularity_show,
615 };
616 
617 static struct queue_sysfs_entry queue_discard_max_hw_entry = {
618         .attr = {.name = "discard_max_hw_bytes", .mode = 0444 },
619         .show = queue_discard_max_hw_show,
620 };
621 
622 static struct queue_sysfs_entry queue_discard_max_entry = {
623         .attr = {.name = "discard_max_bytes", .mode = 0644 },
624         .show = queue_discard_max_show,
625         .store = queue_discard_max_store,
626 };
627 
628 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
629         .attr = {.name = "discard_zeroes_data", .mode = 0444 },
630         .show = queue_discard_zeroes_data_show,
631 };
632 
633 static struct queue_sysfs_entry queue_write_same_max_entry = {
634         .attr = {.name = "write_same_max_bytes", .mode = 0444 },
635         .show = queue_write_same_max_show,
636 };
637 
638 static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
639         .attr = {.name = "write_zeroes_max_bytes", .mode = 0444 },
640         .show = queue_write_zeroes_max_show,
641 };
642 
643 static struct queue_sysfs_entry queue_nonrot_entry = {
644         .attr = {.name = "rotational", .mode = 0644 },
645         .show = queue_show_nonrot,
646         .store = queue_store_nonrot,
647 };
648 
649 static struct queue_sysfs_entry queue_zoned_entry = {
650         .attr = {.name = "zoned", .mode = 0444 },
651         .show = queue_zoned_show,
652 };
653 
654 static struct queue_sysfs_entry queue_nr_zones_entry = {
655         .attr = {.name = "nr_zones", .mode = 0444 },
656         .show = queue_nr_zones_show,
657 };
658 
659 static struct queue_sysfs_entry queue_nomerges_entry = {
660         .attr = {.name = "nomerges", .mode = 0644 },
661         .show = queue_nomerges_show,
662         .store = queue_nomerges_store,
663 };
664 
665 static struct queue_sysfs_entry queue_rq_affinity_entry = {
666         .attr = {.name = "rq_affinity", .mode = 0644 },
667         .show = queue_rq_affinity_show,
668         .store = queue_rq_affinity_store,
669 };
670 
671 static struct queue_sysfs_entry queue_iostats_entry = {
672         .attr = {.name = "iostats", .mode = 0644 },
673         .show = queue_show_iostats,
674         .store = queue_store_iostats,
675 };
676 
677 static struct queue_sysfs_entry queue_random_entry = {
678         .attr = {.name = "add_random", .mode = 0644 },
679         .show = queue_show_random,
680         .store = queue_store_random,
681 };
682 
683 static struct queue_sysfs_entry queue_poll_entry = {
684         .attr = {.name = "io_poll", .mode = 0644 },
685         .show = queue_poll_show,
686         .store = queue_poll_store,
687 };
688 
689 static struct queue_sysfs_entry queue_poll_delay_entry = {
690         .attr = {.name = "io_poll_delay", .mode = 0644 },
691         .show = queue_poll_delay_show,
692         .store = queue_poll_delay_store,
693 };
694 
695 static struct queue_sysfs_entry queue_wc_entry = {
696         .attr = {.name = "write_cache", .mode = 0644 },
697         .show = queue_wc_show,
698         .store = queue_wc_store,
699 };
700 
701 static struct queue_sysfs_entry queue_fua_entry = {
702         .attr = {.name = "fua", .mode = 0444 },
703         .show = queue_fua_show,
704 };
705 
706 static struct queue_sysfs_entry queue_dax_entry = {
707         .attr = {.name = "dax", .mode = 0444 },
708         .show = queue_dax_show,
709 };
710 
711 static struct queue_sysfs_entry queue_io_timeout_entry = {
712         .attr = {.name = "io_timeout", .mode = 0644 },
713         .show = queue_io_timeout_show,
714         .store = queue_io_timeout_store,
715 };
716 
717 static struct queue_sysfs_entry queue_wb_lat_entry = {
718         .attr = {.name = "wbt_lat_usec", .mode = 0644 },
719         .show = queue_wb_lat_show,
720         .store = queue_wb_lat_store,
721 };
722 
723 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
724 static struct queue_sysfs_entry throtl_sample_time_entry = {
725         .attr = {.name = "throttle_sample_time", .mode = 0644 },
726         .show = blk_throtl_sample_time_show,
727         .store = blk_throtl_sample_time_store,
728 };
729 #endif
730 
731 static struct attribute *queue_attrs[] = {
732         &queue_requests_entry.attr,
733         &queue_ra_entry.attr,
734         &queue_max_hw_sectors_entry.attr,
735         &queue_max_sectors_entry.attr,
736         &queue_max_segments_entry.attr,
737         &queue_max_discard_segments_entry.attr,
738         &queue_max_integrity_segments_entry.attr,
739         &queue_max_segment_size_entry.attr,
740         &queue_iosched_entry.attr,
741         &queue_hw_sector_size_entry.attr,
742         &queue_logical_block_size_entry.attr,
743         &queue_physical_block_size_entry.attr,
744         &queue_chunk_sectors_entry.attr,
745         &queue_io_min_entry.attr,
746         &queue_io_opt_entry.attr,
747         &queue_discard_granularity_entry.attr,
748         &queue_discard_max_entry.attr,
749         &queue_discard_max_hw_entry.attr,
750         &queue_discard_zeroes_data_entry.attr,
751         &queue_write_same_max_entry.attr,
752         &queue_write_zeroes_max_entry.attr,
753         &queue_nonrot_entry.attr,
754         &queue_zoned_entry.attr,
755         &queue_nr_zones_entry.attr,
756         &queue_nomerges_entry.attr,
757         &queue_rq_affinity_entry.attr,
758         &queue_iostats_entry.attr,
759         &queue_random_entry.attr,
760         &queue_poll_entry.attr,
761         &queue_wc_entry.attr,
762         &queue_fua_entry.attr,
763         &queue_dax_entry.attr,
764         &queue_wb_lat_entry.attr,
765         &queue_poll_delay_entry.attr,
766         &queue_io_timeout_entry.attr,
767 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
768         &throtl_sample_time_entry.attr,
769 #endif
770         NULL,
771 };
772 
773 static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
774                                 int n)
775 {
776         struct request_queue *q =
777                 container_of(kobj, struct request_queue, kobj);
778 
779         if (attr == &queue_io_timeout_entry.attr &&
780                 (!q->mq_ops || !q->mq_ops->timeout))
781                         return 0;
782 
783         return attr->mode;
784 }
785 
786 static struct attribute_group queue_attr_group = {
787         .attrs = queue_attrs,
788         .is_visible = queue_attr_visible,
789 };
790 
791 
792 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
793 
794 static ssize_t
795 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
796 {
797         struct queue_sysfs_entry *entry = to_queue(attr);
798         struct request_queue *q =
799                 container_of(kobj, struct request_queue, kobj);
800         ssize_t res;
801 
802         if (!entry->show)
803                 return -EIO;
804         mutex_lock(&q->sysfs_lock);
805         if (blk_queue_dying(q)) {
806                 mutex_unlock(&q->sysfs_lock);
807                 return -ENOENT;
808         }
809         res = entry->show(q, page);
810         mutex_unlock(&q->sysfs_lock);
811         return res;
812 }
813 
814 static ssize_t
815 queue_attr_store(struct kobject *kobj, struct attribute *attr,
816                     const char *page, size_t length)
817 {
818         struct queue_sysfs_entry *entry = to_queue(attr);
819         struct request_queue *q;
820         ssize_t res;
821 
822         if (!entry->store)
823                 return -EIO;
824 
825         q = container_of(kobj, struct request_queue, kobj);
826         mutex_lock(&q->sysfs_lock);
827         if (blk_queue_dying(q)) {
828                 mutex_unlock(&q->sysfs_lock);
829                 return -ENOENT;
830         }
831         res = entry->store(q, page, length);
832         mutex_unlock(&q->sysfs_lock);
833         return res;
834 }
835 
836 static void blk_free_queue_rcu(struct rcu_head *rcu_head)
837 {
838         struct request_queue *q = container_of(rcu_head, struct request_queue,
839                                                rcu_head);
840         kmem_cache_free(blk_requestq_cachep, q);
841 }
842 
843 /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
844 static void blk_exit_queue(struct request_queue *q)
845 {
846         /*
847          * Since the I/O scheduler exit code may access cgroup information,
848          * perform I/O scheduler exit before disassociating from the block
849          * cgroup controller.
850          */
851         if (q->elevator) {
852                 ioc_clear_queue(q);
853                 __elevator_exit(q, q->elevator);
854                 q->elevator = NULL;
855         }
856 
857         /*
858          * Remove all references to @q from the block cgroup controller before
859          * restoring @q->queue_lock to avoid that restoring this pointer causes
860          * e.g. blkcg_print_blkgs() to crash.
861          */
862         blkcg_exit_queue(q);
863 
864         /*
865          * Since the cgroup code may dereference the @q->backing_dev_info
866          * pointer, only decrease its reference count after having removed the
867          * association with the block cgroup controller.
868          */
869         bdi_put(q->backing_dev_info);
870 }
871 
872 
873 /**
874  * __blk_release_queue - release a request queue
875  * @work: pointer to the release_work member of the request queue to be released
876  *
877  * Description:
878  *     This function is called when a block device is being unregistered. The
879  *     process of releasing a request queue starts with blk_cleanup_queue, which
880  *     set the appropriate flags and then calls blk_put_queue, that decrements
881  *     the reference counter of the request queue. Once the reference counter
882  *     of the request queue reaches zero, blk_release_queue is called to release
883  *     all allocated resources of the request queue.
884  */
885 static void __blk_release_queue(struct work_struct *work)
886 {
887         struct request_queue *q = container_of(work, typeof(*q), release_work);
888 
889         if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
890                 blk_stat_remove_callback(q, q->poll_cb);
891         blk_stat_free_callback(q->poll_cb);
892 
893         blk_free_queue_stats(q->stats);
894 
895         if (queue_is_mq(q))
896                 cancel_delayed_work_sync(&q->requeue_work);
897 
898         blk_exit_queue(q);
899 
900         blk_queue_free_zone_bitmaps(q);
901 
902         if (queue_is_mq(q))
903                 blk_mq_release(q);
904 
905         blk_trace_shutdown(q);
906 
907         if (queue_is_mq(q))
908                 blk_mq_debugfs_unregister(q);
909 
910         bioset_exit(&q->bio_split);
911 
912         ida_simple_remove(&blk_queue_ida, q->id);
913         call_rcu(&q->rcu_head, blk_free_queue_rcu);
914 }
915 
916 static void blk_release_queue(struct kobject *kobj)
917 {
918         struct request_queue *q =
919                 container_of(kobj, struct request_queue, kobj);
920 
921         INIT_WORK(&q->release_work, __blk_release_queue);
922         schedule_work(&q->release_work);
923 }
924 
925 static const struct sysfs_ops queue_sysfs_ops = {
926         .show   = queue_attr_show,
927         .store  = queue_attr_store,
928 };
929 
930 struct kobj_type blk_queue_ktype = {
931         .sysfs_ops      = &queue_sysfs_ops,
932         .release        = blk_release_queue,
933 };
934 
935 /**
936  * blk_register_queue - register a block layer queue with sysfs
937  * @disk: Disk of which the request queue should be registered with sysfs.
938  */
939 int blk_register_queue(struct gendisk *disk)
940 {
941         int ret;
942         struct device *dev = disk_to_dev(disk);
943         struct request_queue *q = disk->queue;
944 
945         if (WARN_ON(!q))
946                 return -ENXIO;
947 
948         WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
949                   "%s is registering an already registered queue\n",
950                   kobject_name(&dev->kobj));
951         blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
952 
953         /*
954          * SCSI probing may synchronously create and destroy a lot of
955          * request_queues for non-existent devices.  Shutting down a fully
956          * functional queue takes measureable wallclock time as RCU grace
957          * periods are involved.  To avoid excessive latency in these
958          * cases, a request_queue starts out in a degraded mode which is
959          * faster to shut down and is made fully functional here as
960          * request_queues for non-existent devices never get registered.
961          */
962         if (!blk_queue_init_done(q)) {
963                 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
964                 percpu_ref_switch_to_percpu(&q->q_usage_counter);
965         }
966 
967         ret = blk_trace_init_sysfs(dev);
968         if (ret)
969                 return ret;
970 
971         /* Prevent changes through sysfs until registration is completed. */
972         mutex_lock(&q->sysfs_lock);
973 
974         ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
975         if (ret < 0) {
976                 blk_trace_remove_sysfs(dev);
977                 goto unlock;
978         }
979 
980         ret = sysfs_create_group(&q->kobj, &queue_attr_group);
981         if (ret) {
982                 blk_trace_remove_sysfs(dev);
983                 kobject_del(&q->kobj);
984                 kobject_put(&dev->kobj);
985                 goto unlock;
986         }
987 
988         if (queue_is_mq(q)) {
989                 __blk_mq_register_dev(dev, q);
990                 blk_mq_debugfs_register(q);
991         }
992 
993         kobject_uevent(&q->kobj, KOBJ_ADD);
994 
995         wbt_enable_default(q);
996 
997         blk_throtl_register_queue(q);
998 
999         if (q->elevator) {
1000                 ret = elv_register_queue(q);
1001                 if (ret) {
1002                         mutex_unlock(&q->sysfs_lock);
1003                         kobject_uevent(&q->kobj, KOBJ_REMOVE);
1004                         kobject_del(&q->kobj);
1005                         blk_trace_remove_sysfs(dev);
1006                         kobject_put(&dev->kobj);
1007                         return ret;
1008                 }
1009         }
1010         ret = 0;
1011 unlock:
1012         mutex_unlock(&q->sysfs_lock);
1013         return ret;
1014 }
1015 EXPORT_SYMBOL_GPL(blk_register_queue);
1016 
1017 /**
1018  * blk_unregister_queue - counterpart of blk_register_queue()
1019  * @disk: Disk of which the request queue should be unregistered from sysfs.
1020  *
1021  * Note: the caller is responsible for guaranteeing that this function is called
1022  * after blk_register_queue() has finished.
1023  */
1024 void blk_unregister_queue(struct gendisk *disk)
1025 {
1026         struct request_queue *q = disk->queue;
1027 
1028         if (WARN_ON(!q))
1029                 return;
1030 
1031         /* Return early if disk->queue was never registered. */
1032         if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
1033                 return;
1034 
1035         /*
1036          * Since sysfs_remove_dir() prevents adding new directory entries
1037          * before removal of existing entries starts, protect against
1038          * concurrent elv_iosched_store() calls.
1039          */
1040         mutex_lock(&q->sysfs_lock);
1041 
1042         blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
1043 
1044         /*
1045          * Remove the sysfs attributes before unregistering the queue data
1046          * structures that can be modified through sysfs.
1047          */
1048         if (queue_is_mq(q))
1049                 blk_mq_unregister_dev(disk_to_dev(disk), q);
1050         mutex_unlock(&q->sysfs_lock);
1051 
1052         kobject_uevent(&q->kobj, KOBJ_REMOVE);
1053         kobject_del(&q->kobj);
1054         blk_trace_remove_sysfs(disk_to_dev(disk));
1055 
1056         mutex_lock(&q->sysfs_lock);
1057         if (q->elevator)
1058                 elv_unregister_queue(q);
1059         mutex_unlock(&q->sysfs_lock);
1060 
1061         kobject_put(&disk_to_dev(disk)->kobj);
1062 }
1063 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp