~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/block/blk-mq-sysfs.c

Version: ~ [ linux-5.7-rc7 ] ~ [ linux-5.6.14 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.42 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.124 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.181 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.224 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.224 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.84 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #include <linux/kernel.h>
  2 #include <linux/module.h>
  3 #include <linux/backing-dev.h>
  4 #include <linux/bio.h>
  5 #include <linux/blkdev.h>
  6 #include <linux/mm.h>
  7 #include <linux/init.h>
  8 #include <linux/slab.h>
  9 #include <linux/workqueue.h>
 10 #include <linux/smp.h>
 11 
 12 #include <linux/blk-mq.h>
 13 #include "blk.h"
 14 #include "blk-mq.h"
 15 #include "blk-mq-tag.h"
 16 
 17 static void blk_mq_sysfs_release(struct kobject *kobj)
 18 {
 19         struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
 20 
 21         free_percpu(ctxs->queue_ctx);
 22         kfree(ctxs);
 23 }
 24 
 25 static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
 26 {
 27         struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
 28 
 29         /* ctx->ctxs won't be released until all ctx are freed */
 30         kobject_put(&ctx->ctxs->kobj);
 31 }
 32 
 33 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
 34 {
 35         struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
 36                                                   kobj);
 37 
 38         if (hctx->flags & BLK_MQ_F_BLOCKING)
 39                 cleanup_srcu_struct(hctx->srcu);
 40         blk_free_flush_queue(hctx->fq);
 41         sbitmap_free(&hctx->ctx_map);
 42         free_cpumask_var(hctx->cpumask);
 43         kfree(hctx->ctxs);
 44         kfree(hctx);
 45 }
 46 
 47 struct blk_mq_ctx_sysfs_entry {
 48         struct attribute attr;
 49         ssize_t (*show)(struct blk_mq_ctx *, char *);
 50         ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
 51 };
 52 
 53 struct blk_mq_hw_ctx_sysfs_entry {
 54         struct attribute attr;
 55         ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
 56         ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
 57 };
 58 
 59 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
 60                                  char *page)
 61 {
 62         struct blk_mq_ctx_sysfs_entry *entry;
 63         struct blk_mq_ctx *ctx;
 64         struct request_queue *q;
 65         ssize_t res;
 66 
 67         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
 68         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
 69         q = ctx->queue;
 70 
 71         if (!entry->show)
 72                 return -EIO;
 73 
 74         res = -ENOENT;
 75         mutex_lock(&q->sysfs_lock);
 76         if (!blk_queue_dying(q))
 77                 res = entry->show(ctx, page);
 78         mutex_unlock(&q->sysfs_lock);
 79         return res;
 80 }
 81 
 82 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
 83                                   const char *page, size_t length)
 84 {
 85         struct blk_mq_ctx_sysfs_entry *entry;
 86         struct blk_mq_ctx *ctx;
 87         struct request_queue *q;
 88         ssize_t res;
 89 
 90         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
 91         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
 92         q = ctx->queue;
 93 
 94         if (!entry->store)
 95                 return -EIO;
 96 
 97         res = -ENOENT;
 98         mutex_lock(&q->sysfs_lock);
 99         if (!blk_queue_dying(q))
100                 res = entry->store(ctx, page, length);
101         mutex_unlock(&q->sysfs_lock);
102         return res;
103 }
104 
105 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
106                                     struct attribute *attr, char *page)
107 {
108         struct blk_mq_hw_ctx_sysfs_entry *entry;
109         struct blk_mq_hw_ctx *hctx;
110         struct request_queue *q;
111         ssize_t res;
112 
113         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
114         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
115         q = hctx->queue;
116 
117         if (!entry->show)
118                 return -EIO;
119 
120         res = -ENOENT;
121         mutex_lock(&q->sysfs_lock);
122         if (!blk_queue_dying(q))
123                 res = entry->show(hctx, page);
124         mutex_unlock(&q->sysfs_lock);
125         return res;
126 }
127 
128 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
129                                      struct attribute *attr, const char *page,
130                                      size_t length)
131 {
132         struct blk_mq_hw_ctx_sysfs_entry *entry;
133         struct blk_mq_hw_ctx *hctx;
134         struct request_queue *q;
135         ssize_t res;
136 
137         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
138         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
139         q = hctx->queue;
140 
141         if (!entry->store)
142                 return -EIO;
143 
144         res = -ENOENT;
145         mutex_lock(&q->sysfs_lock);
146         if (!blk_queue_dying(q))
147                 res = entry->store(hctx, page, length);
148         mutex_unlock(&q->sysfs_lock);
149         return res;
150 }
151 
152 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
153                                             char *page)
154 {
155         return sprintf(page, "%u\n", hctx->tags->nr_tags);
156 }
157 
158 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
159                                                      char *page)
160 {
161         return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
162 }
163 
164 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
165 {
166         unsigned int i, first = 1;
167         ssize_t ret = 0;
168 
169         for_each_cpu(i, hctx->cpumask) {
170                 if (first)
171                         ret += sprintf(ret + page, "%u", i);
172                 else
173                         ret += sprintf(ret + page, ", %u", i);
174 
175                 first = 0;
176         }
177 
178         ret += sprintf(ret + page, "\n");
179         return ret;
180 }
181 
182 static struct attribute *default_ctx_attrs[] = {
183         NULL,
184 };
185 
186 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
187         .attr = {.name = "nr_tags", .mode = 0444 },
188         .show = blk_mq_hw_sysfs_nr_tags_show,
189 };
190 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
191         .attr = {.name = "nr_reserved_tags", .mode = 0444 },
192         .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
193 };
194 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
195         .attr = {.name = "cpu_list", .mode = 0444 },
196         .show = blk_mq_hw_sysfs_cpus_show,
197 };
198 
199 static struct attribute *default_hw_ctx_attrs[] = {
200         &blk_mq_hw_sysfs_nr_tags.attr,
201         &blk_mq_hw_sysfs_nr_reserved_tags.attr,
202         &blk_mq_hw_sysfs_cpus.attr,
203         NULL,
204 };
205 
206 static const struct sysfs_ops blk_mq_sysfs_ops = {
207         .show   = blk_mq_sysfs_show,
208         .store  = blk_mq_sysfs_store,
209 };
210 
211 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
212         .show   = blk_mq_hw_sysfs_show,
213         .store  = blk_mq_hw_sysfs_store,
214 };
215 
216 static struct kobj_type blk_mq_ktype = {
217         .sysfs_ops      = &blk_mq_sysfs_ops,
218         .release        = blk_mq_sysfs_release,
219 };
220 
221 static struct kobj_type blk_mq_ctx_ktype = {
222         .sysfs_ops      = &blk_mq_sysfs_ops,
223         .default_attrs  = default_ctx_attrs,
224         .release        = blk_mq_ctx_sysfs_release,
225 };
226 
227 static struct kobj_type blk_mq_hw_ktype = {
228         .sysfs_ops      = &blk_mq_hw_sysfs_ops,
229         .default_attrs  = default_hw_ctx_attrs,
230         .release        = blk_mq_hw_sysfs_release,
231 };
232 
233 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
234 {
235         struct blk_mq_ctx *ctx;
236         int i;
237 
238         if (!hctx->nr_ctx)
239                 return;
240 
241         hctx_for_each_ctx(hctx, ctx, i)
242                 kobject_del(&ctx->kobj);
243 
244         kobject_del(&hctx->kobj);
245 }
246 
247 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
248 {
249         struct request_queue *q = hctx->queue;
250         struct blk_mq_ctx *ctx;
251         int i, ret;
252 
253         if (!hctx->nr_ctx)
254                 return 0;
255 
256         ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
257         if (ret)
258                 return ret;
259 
260         hctx_for_each_ctx(hctx, ctx, i) {
261                 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
262                 if (ret)
263                         break;
264         }
265 
266         return ret;
267 }
268 
269 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
270 {
271         struct blk_mq_hw_ctx *hctx;
272         int i;
273 
274         lockdep_assert_held(&q->sysfs_lock);
275 
276         queue_for_each_hw_ctx(q, hctx, i)
277                 blk_mq_unregister_hctx(hctx);
278 
279         kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
280         kobject_del(q->mq_kobj);
281         kobject_put(&dev->kobj);
282 
283         q->mq_sysfs_init_done = false;
284 }
285 
286 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
287 {
288         kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
289 }
290 
291 void blk_mq_sysfs_deinit(struct request_queue *q)
292 {
293         struct blk_mq_ctx *ctx;
294         int cpu;
295 
296         for_each_possible_cpu(cpu) {
297                 ctx = per_cpu_ptr(q->queue_ctx, cpu);
298                 kobject_put(&ctx->kobj);
299         }
300         kobject_put(q->mq_kobj);
301 }
302 
303 void blk_mq_sysfs_init(struct request_queue *q)
304 {
305         struct blk_mq_ctx *ctx;
306         int cpu;
307 
308         kobject_init(q->mq_kobj, &blk_mq_ktype);
309 
310         for_each_possible_cpu(cpu) {
311                 ctx = per_cpu_ptr(q->queue_ctx, cpu);
312 
313                 kobject_get(q->mq_kobj);
314                 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
315         }
316 }
317 
318 int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
319 {
320         struct blk_mq_hw_ctx *hctx;
321         int ret, i;
322 
323         WARN_ON_ONCE(!q->kobj.parent);
324         lockdep_assert_held(&q->sysfs_lock);
325 
326         ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
327         if (ret < 0)
328                 goto out;
329 
330         kobject_uevent(q->mq_kobj, KOBJ_ADD);
331 
332         queue_for_each_hw_ctx(q, hctx, i) {
333                 ret = blk_mq_register_hctx(hctx);
334                 if (ret)
335                         goto unreg;
336         }
337 
338         q->mq_sysfs_init_done = true;
339 
340 out:
341         return ret;
342 
343 unreg:
344         while (--i >= 0)
345                 blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
346 
347         kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
348         kobject_del(q->mq_kobj);
349         kobject_put(&dev->kobj);
350         return ret;
351 }
352 
353 int blk_mq_register_dev(struct device *dev, struct request_queue *q)
354 {
355         int ret;
356 
357         mutex_lock(&q->sysfs_lock);
358         ret = __blk_mq_register_dev(dev, q);
359         mutex_unlock(&q->sysfs_lock);
360 
361         return ret;
362 }
363 
364 void blk_mq_sysfs_unregister(struct request_queue *q)
365 {
366         struct blk_mq_hw_ctx *hctx;
367         int i;
368 
369         mutex_lock(&q->sysfs_lock);
370         if (!q->mq_sysfs_init_done)
371                 goto unlock;
372 
373         queue_for_each_hw_ctx(q, hctx, i)
374                 blk_mq_unregister_hctx(hctx);
375 
376 unlock:
377         mutex_unlock(&q->sysfs_lock);
378 }
379 
380 int blk_mq_sysfs_register(struct request_queue *q)
381 {
382         struct blk_mq_hw_ctx *hctx;
383         int i, ret = 0;
384 
385         mutex_lock(&q->sysfs_lock);
386         if (!q->mq_sysfs_init_done)
387                 goto unlock;
388 
389         queue_for_each_hw_ctx(q, hctx, i) {
390                 ret = blk_mq_register_hctx(hctx);
391                 if (ret)
392                         break;
393         }
394 
395 unlock:
396         mutex_unlock(&q->sysfs_lock);
397 
398         return ret;
399 }
400 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp