1 /* 2 * Deadline i/o scheduler. 3 * 4 * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk> 5 */ 6 #include <linux/kernel.h> 7 #include <linux/fs.h> 8 #include <linux/blkdev.h> 9 #include <linux/elevator.h> 10 #include <linux/bio.h> 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/init.h> 14 #include <linux/compiler.h> 15 #include <linux/rbtree.h> 16 17 /* 18 * See Documentation/block/deadline-iosched.txt 19 */ 20 static const int read_expire = HZ / 2; /* max time before a read is submitted. */ 21 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ 22 static const int writes_starved = 2; /* max times reads can starve a write */ 23 static const int fifo_batch = 16; /* # of sequential requests treated as one 24 by the above parameters. For throughput. */ 25 26 struct deadline_data { 27 /* 28 * run time data 29 */ 30 31 /* 32 * requests (deadline_rq s) are present on both sort_list and fifo_list 33 */ 34 struct rb_root sort_list[2]; 35 struct list_head fifo_list[2]; 36 37 /* 38 * next in sort order. read, write or both are NULL 39 */ 40 struct request *next_rq[2]; 41 unsigned int batching; /* number of sequential requests made */ 42 unsigned int starved; /* times reads have starved writes */ 43 44 /* 45 * settings that change how the i/o scheduler behaves 46 */ 47 int fifo_expire[2]; 48 int fifo_batch; 49 int writes_starved; 50 int front_merges; 51 }; 52 53 static void deadline_move_request(struct deadline_data *, struct request *); 54 55 static inline struct rb_root * 56 deadline_rb_root(struct deadline_data *dd, struct request *rq) 57 { 58 return &dd->sort_list[rq_data_dir(rq)]; 59 } 60 61 /* 62 * get the request after `rq' in sector-sorted order 63 */ 64 static inline struct request * 65 deadline_latter_request(struct request *rq) 66 { 67 struct rb_node *node = rb_next(&rq->rb_node); 68 69 if (node) 70 return rb_entry_rq(node); 71 72 return NULL; 73 } 74 75 static void 76 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) 77 { 78 struct rb_root *root = deadline_rb_root(dd, rq); 79 80 elv_rb_add(root, rq); 81 } 82 83 static inline void 84 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) 85 { 86 const int data_dir = rq_data_dir(rq); 87 88 if (dd->next_rq[data_dir] == rq) 89 dd->next_rq[data_dir] = deadline_latter_request(rq); 90 91 elv_rb_del(deadline_rb_root(dd, rq), rq); 92 } 93 94 /* 95 * add rq to rbtree and fifo 96 */ 97 static void 98 deadline_add_request(struct request_queue *q, struct request *rq) 99 { 100 struct deadline_data *dd = q->elevator->elevator_data; 101 const int data_dir = rq_data_dir(rq); 102 103 deadline_add_rq_rb(dd, rq); 104 105 /* 106 * set expire time and add to fifo list 107 */ 108 rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; 109 list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); 110 } 111 112 /* 113 * remove rq from rbtree and fifo. 114 */ 115 static void deadline_remove_request(struct request_queue *q, struct request *rq) 116 { 117 struct deadline_data *dd = q->elevator->elevator_data; 118 119 rq_fifo_clear(rq); 120 deadline_del_rq_rb(dd, rq); 121 } 122 123 static int 124 deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) 125 { 126 struct deadline_data *dd = q->elevator->elevator_data; 127 struct request *__rq; 128 int ret; 129 130 /* 131 * check for front merge 132 */ 133 if (dd->front_merges) { 134 sector_t sector = bio_end_sector(bio); 135 136 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); 137 if (__rq) { 138 BUG_ON(sector != blk_rq_pos(__rq)); 139 140 if (elv_rq_merge_ok(__rq, bio)) { 141 ret = ELEVATOR_FRONT_MERGE; 142 goto out; 143 } 144 } 145 } 146 147 return ELEVATOR_NO_MERGE; 148 out: 149 *req = __rq; 150 return ret; 151 } 152 153 static void deadline_merged_request(struct request_queue *q, 154 struct request *req, int type) 155 { 156 struct deadline_data *dd = q->elevator->elevator_data; 157 158 /* 159 * if the merge was a front merge, we need to reposition request 160 */ 161 if (type == ELEVATOR_FRONT_MERGE) { 162 elv_rb_del(deadline_rb_root(dd, req), req); 163 deadline_add_rq_rb(dd, req); 164 } 165 } 166 167 static void 168 deadline_merged_requests(struct request_queue *q, struct request *req, 169 struct request *next) 170 { 171 /* 172 * if next expires before rq, assign its expire time to rq 173 * and move into next position (next will be deleted) in fifo 174 */ 175 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 176 if (time_before(next->fifo_time, req->fifo_time)) { 177 list_move(&req->queuelist, &next->queuelist); 178 req->fifo_time = next->fifo_time; 179 } 180 } 181 182 /* 183 * kill knowledge of next, this one is a goner 184 */ 185 deadline_remove_request(q, next); 186 } 187 188 /* 189 * move request from sort list to dispatch queue. 190 */ 191 static inline void 192 deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) 193 { 194 struct request_queue *q = rq->q; 195 196 deadline_remove_request(q, rq); 197 elv_dispatch_add_tail(q, rq); 198 } 199 200 /* 201 * move an entry to dispatch queue 202 */ 203 static void 204 deadline_move_request(struct deadline_data *dd, struct request *rq) 205 { 206 const int data_dir = rq_data_dir(rq); 207 208 dd->next_rq[READ] = NULL; 209 dd->next_rq[WRITE] = NULL; 210 dd->next_rq[data_dir] = deadline_latter_request(rq); 211 212 /* 213 * take it off the sort and fifo list, move 214 * to dispatch queue 215 */ 216 deadline_move_to_dispatch(dd, rq); 217 } 218 219 /* 220 * deadline_check_fifo returns 0 if there are no expired requests on the fifo, 221 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) 222 */ 223 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) 224 { 225 struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); 226 227 /* 228 * rq is expired! 229 */ 230 if (time_after_eq(jiffies, rq->fifo_time)) 231 return 1; 232 233 return 0; 234 } 235 236 /* 237 * deadline_dispatch_requests selects the best request according to 238 * read/write expire, fifo_batch, etc 239 */ 240 static int deadline_dispatch_requests(struct request_queue *q, int force) 241 { 242 struct deadline_data *dd = q->elevator->elevator_data; 243 const int reads = !list_empty(&dd->fifo_list[READ]); 244 const int writes = !list_empty(&dd->fifo_list[WRITE]); 245 struct request *rq; 246 int data_dir; 247 248 /* 249 * batches are currently reads XOR writes 250 */ 251 if (dd->next_rq[WRITE]) 252 rq = dd->next_rq[WRITE]; 253 else 254 rq = dd->next_rq[READ]; 255 256 if (rq && dd->batching < dd->fifo_batch) 257 /* we have a next request are still entitled to batch */ 258 goto dispatch_request; 259 260 /* 261 * at this point we are not running a batch. select the appropriate 262 * data direction (read / write) 263 */ 264 265 if (reads) { 266 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); 267 268 if (writes && (dd->starved++ >= dd->writes_starved)) 269 goto dispatch_writes; 270 271 data_dir = READ; 272 273 goto dispatch_find_request; 274 } 275 276 /* 277 * there are either no reads or writes have been starved 278 */ 279 280 if (writes) { 281 dispatch_writes: 282 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); 283 284 dd->starved = 0; 285 286 data_dir = WRITE; 287 288 goto dispatch_find_request; 289 } 290 291 return 0; 292 293 dispatch_find_request: 294 /* 295 * we are not running a batch, find best request for selected data_dir 296 */ 297 if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) { 298 /* 299 * A deadline has expired, the last request was in the other 300 * direction, or we have run out of higher-sectored requests. 301 * Start again from the request with the earliest expiry time. 302 */ 303 rq = rq_entry_fifo(dd->fifo_list[data_dir].next); 304 } else { 305 /* 306 * The last req was the same dir and we have a next request in 307 * sort order. No expired requests so continue on from here. 308 */ 309 rq = dd->next_rq[data_dir]; 310 } 311 312 dd->batching = 0; 313 314 dispatch_request: 315 /* 316 * rq is the selected appropriate request. 317 */ 318 dd->batching++; 319 deadline_move_request(dd, rq); 320 321 return 1; 322 } 323 324 static void deadline_exit_queue(struct elevator_queue *e) 325 { 326 struct deadline_data *dd = e->elevator_data; 327 328 BUG_ON(!list_empty(&dd->fifo_list[READ])); 329 BUG_ON(!list_empty(&dd->fifo_list[WRITE])); 330 331 kfree(dd); 332 } 333 334 /* 335 * initialize elevator private data (deadline_data). 336 */ 337 static int deadline_init_queue(struct request_queue *q, struct elevator_type *e) 338 { 339 struct deadline_data *dd; 340 struct elevator_queue *eq; 341 342 eq = elevator_alloc(q, e); 343 if (!eq) 344 return -ENOMEM; 345 346 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); 347 if (!dd) { 348 kobject_put(&eq->kobj); 349 return -ENOMEM; 350 } 351 eq->elevator_data = dd; 352 353 INIT_LIST_HEAD(&dd->fifo_list[READ]); 354 INIT_LIST_HEAD(&dd->fifo_list[WRITE]); 355 dd->sort_list[READ] = RB_ROOT; 356 dd->sort_list[WRITE] = RB_ROOT; 357 dd->fifo_expire[READ] = read_expire; 358 dd->fifo_expire[WRITE] = write_expire; 359 dd->writes_starved = writes_starved; 360 dd->front_merges = 1; 361 dd->fifo_batch = fifo_batch; 362 363 spin_lock_irq(q->queue_lock); 364 q->elevator = eq; 365 spin_unlock_irq(q->queue_lock); 366 return 0; 367 } 368 369 /* 370 * sysfs parts below 371 */ 372 373 static ssize_t 374 deadline_var_show(int var, char *page) 375 { 376 return sprintf(page, "%d\n", var); 377 } 378 379 static ssize_t 380 deadline_var_store(int *var, const char *page, size_t count) 381 { 382 char *p = (char *) page; 383 384 *var = simple_strtol(p, &p, 10); 385 return count; 386 } 387 388 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 389 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 390 { \ 391 struct deadline_data *dd = e->elevator_data; \ 392 int __data = __VAR; \ 393 if (__CONV) \ 394 __data = jiffies_to_msecs(__data); \ 395 return deadline_var_show(__data, (page)); \ 396 } 397 SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); 398 SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); 399 SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); 400 SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); 401 SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); 402 #undef SHOW_FUNCTION 403 404 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 405 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 406 { \ 407 struct deadline_data *dd = e->elevator_data; \ 408 int __data; \ 409 int ret = deadline_var_store(&__data, (page), count); \ 410 if (__data < (MIN)) \ 411 __data = (MIN); \ 412 else if (__data > (MAX)) \ 413 __data = (MAX); \ 414 if (__CONV) \ 415 *(__PTR) = msecs_to_jiffies(__data); \ 416 else \ 417 *(__PTR) = __data; \ 418 return ret; \ 419 } 420 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); 421 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); 422 STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); 423 STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); 424 STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); 425 #undef STORE_FUNCTION 426 427 #define DD_ATTR(name) \ 428 __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ 429 deadline_##name##_store) 430 431 static struct elv_fs_entry deadline_attrs[] = { 432 DD_ATTR(read_expire), 433 DD_ATTR(write_expire), 434 DD_ATTR(writes_starved), 435 DD_ATTR(front_merges), 436 DD_ATTR(fifo_batch), 437 __ATTR_NULL 438 }; 439 440 static struct elevator_type iosched_deadline = { 441 .ops = { 442 .elevator_merge_fn = deadline_merge, 443 .elevator_merged_fn = deadline_merged_request, 444 .elevator_merge_req_fn = deadline_merged_requests, 445 .elevator_dispatch_fn = deadline_dispatch_requests, 446 .elevator_add_req_fn = deadline_add_request, 447 .elevator_former_req_fn = elv_rb_former_request, 448 .elevator_latter_req_fn = elv_rb_latter_request, 449 .elevator_init_fn = deadline_init_queue, 450 .elevator_exit_fn = deadline_exit_queue, 451 }, 452 453 .elevator_attrs = deadline_attrs, 454 .elevator_name = "deadline", 455 .elevator_owner = THIS_MODULE, 456 }; 457 458 static int __init deadline_init(void) 459 { 460 return elv_register(&iosched_deadline); 461 } 462 463 static void __exit deadline_exit(void) 464 { 465 elv_unregister(&iosched_deadline); 466 } 467 468 module_init(deadline_init); 469 module_exit(deadline_exit); 470 471 MODULE_AUTHOR("Jens Axboe"); 472 MODULE_LICENSE("GPL"); 473 MODULE_DESCRIPTION("deadline IO scheduler"); 474
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.