1 /* 2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 4 * 5 * This file contains the interrupt descriptor management code 6 * 7 * Detailed information is available in Documentation/DocBook/genericirq 8 * 9 */ 10 #include <linux/irq.h> 11 #include <linux/slab.h> 12 #include <linux/export.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/radix-tree.h> 16 #include <linux/bitmap.h> 17 #include <linux/irqdomain.h> 18 #include <linux/sysfs.h> 19 20 #include "internals.h" 21 22 /* 23 * lockdep: we want to handle all irq_desc locks as a single lock-class: 24 */ 25 static struct lock_class_key irq_desc_lock_class; 26 27 #if defined(CONFIG_SMP) 28 static int __init irq_affinity_setup(char *str) 29 { 30 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 31 cpulist_parse(str, irq_default_affinity); 32 /* 33 * Set at least the boot cpu. We don't want to end up with 34 * bugreports caused by random comandline masks 35 */ 36 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 37 return 1; 38 } 39 __setup("irqaffinity=", irq_affinity_setup); 40 41 static void __init init_irq_default_affinity(void) 42 { 43 #ifdef CONFIG_CPUMASK_OFFSTACK 44 if (!irq_default_affinity) 45 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 46 #endif 47 if (cpumask_empty(irq_default_affinity)) 48 cpumask_setall(irq_default_affinity); 49 } 50 #else 51 static void __init init_irq_default_affinity(void) 52 { 53 } 54 #endif 55 56 #ifdef CONFIG_SMP 57 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) 58 { 59 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, 60 gfp, node)) 61 return -ENOMEM; 62 63 #ifdef CONFIG_GENERIC_PENDING_IRQ 64 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 65 free_cpumask_var(desc->irq_common_data.affinity); 66 return -ENOMEM; 67 } 68 #endif 69 return 0; 70 } 71 72 static void desc_smp_init(struct irq_desc *desc, int node, 73 const struct cpumask *affinity) 74 { 75 if (!affinity) 76 affinity = irq_default_affinity; 77 cpumask_copy(desc->irq_common_data.affinity, affinity); 78 79 #ifdef CONFIG_GENERIC_PENDING_IRQ 80 cpumask_clear(desc->pending_mask); 81 #endif 82 #ifdef CONFIG_NUMA 83 desc->irq_common_data.node = node; 84 #endif 85 } 86 87 #else 88 static inline int 89 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } 90 static inline void 91 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } 92 #endif 93 94 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, 95 const struct cpumask *affinity, struct module *owner) 96 { 97 int cpu; 98 99 desc->irq_common_data.handler_data = NULL; 100 desc->irq_common_data.msi_desc = NULL; 101 102 desc->irq_data.common = &desc->irq_common_data; 103 desc->irq_data.irq = irq; 104 desc->irq_data.chip = &no_irq_chip; 105 desc->irq_data.chip_data = NULL; 106 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 107 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 108 desc->handle_irq = handle_bad_irq; 109 desc->depth = 1; 110 desc->irq_count = 0; 111 desc->irqs_unhandled = 0; 112 desc->tot_count = 0; 113 desc->name = NULL; 114 desc->owner = owner; 115 for_each_possible_cpu(cpu) 116 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; 117 desc_smp_init(desc, node, affinity); 118 } 119 120 int nr_irqs = NR_IRQS; 121 EXPORT_SYMBOL_GPL(nr_irqs); 122 123 static DEFINE_MUTEX(sparse_irq_lock); 124 static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); 125 126 #ifdef CONFIG_SPARSE_IRQ 127 128 static void irq_kobj_release(struct kobject *kobj); 129 130 #ifdef CONFIG_SYSFS 131 static struct kobject *irq_kobj_base; 132 133 #define IRQ_ATTR_RO(_name) \ 134 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 135 136 static ssize_t per_cpu_count_show(struct kobject *kobj, 137 struct kobj_attribute *attr, char *buf) 138 { 139 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 140 int cpu, irq = desc->irq_data.irq; 141 ssize_t ret = 0; 142 char *p = ""; 143 144 for_each_possible_cpu(cpu) { 145 unsigned int c = kstat_irqs_cpu(irq, cpu); 146 147 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); 148 p = ","; 149 } 150 151 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); 152 return ret; 153 } 154 IRQ_ATTR_RO(per_cpu_count); 155 156 static ssize_t chip_name_show(struct kobject *kobj, 157 struct kobj_attribute *attr, char *buf) 158 { 159 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 160 ssize_t ret = 0; 161 162 raw_spin_lock_irq(&desc->lock); 163 if (desc->irq_data.chip && desc->irq_data.chip->name) { 164 ret = scnprintf(buf, PAGE_SIZE, "%s\n", 165 desc->irq_data.chip->name); 166 } 167 raw_spin_unlock_irq(&desc->lock); 168 169 return ret; 170 } 171 IRQ_ATTR_RO(chip_name); 172 173 static ssize_t hwirq_show(struct kobject *kobj, 174 struct kobj_attribute *attr, char *buf) 175 { 176 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 177 ssize_t ret = 0; 178 179 raw_spin_lock_irq(&desc->lock); 180 if (desc->irq_data.domain) 181 ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq); 182 raw_spin_unlock_irq(&desc->lock); 183 184 return ret; 185 } 186 IRQ_ATTR_RO(hwirq); 187 188 static ssize_t type_show(struct kobject *kobj, 189 struct kobj_attribute *attr, char *buf) 190 { 191 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 192 ssize_t ret = 0; 193 194 raw_spin_lock_irq(&desc->lock); 195 ret = sprintf(buf, "%s\n", 196 irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); 197 raw_spin_unlock_irq(&desc->lock); 198 199 return ret; 200 201 } 202 IRQ_ATTR_RO(type); 203 204 static ssize_t name_show(struct kobject *kobj, 205 struct kobj_attribute *attr, char *buf) 206 { 207 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 208 ssize_t ret = 0; 209 210 raw_spin_lock_irq(&desc->lock); 211 if (desc->name) 212 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); 213 raw_spin_unlock_irq(&desc->lock); 214 215 return ret; 216 } 217 IRQ_ATTR_RO(name); 218 219 static ssize_t actions_show(struct kobject *kobj, 220 struct kobj_attribute *attr, char *buf) 221 { 222 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 223 struct irqaction *action; 224 ssize_t ret = 0; 225 char *p = ""; 226 227 raw_spin_lock_irq(&desc->lock); 228 for (action = desc->action; action != NULL; action = action->next) { 229 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", 230 p, action->name); 231 p = ","; 232 } 233 raw_spin_unlock_irq(&desc->lock); 234 235 if (ret) 236 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); 237 238 return ret; 239 } 240 IRQ_ATTR_RO(actions); 241 242 static struct attribute *irq_attrs[] = { 243 &per_cpu_count_attr.attr, 244 &chip_name_attr.attr, 245 &hwirq_attr.attr, 246 &type_attr.attr, 247 &name_attr.attr, 248 &actions_attr.attr, 249 NULL 250 }; 251 252 static struct kobj_type irq_kobj_type = { 253 .release = irq_kobj_release, 254 .sysfs_ops = &kobj_sysfs_ops, 255 .default_attrs = irq_attrs, 256 }; 257 258 static void irq_sysfs_add(int irq, struct irq_desc *desc) 259 { 260 if (irq_kobj_base) { 261 /* 262 * Continue even in case of failure as this is nothing 263 * crucial. 264 */ 265 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) 266 pr_warn("Failed to add kobject for irq %d\n", irq); 267 } 268 } 269 270 static void irq_sysfs_del(struct irq_desc *desc) 271 { 272 /* 273 * If irq_sysfs_init() has not yet been invoked (early boot), then 274 * irq_kobj_base is NULL and the descriptor was never added. 275 * kobject_del() complains about a object with no parent, so make 276 * it conditional. 277 */ 278 if (irq_kobj_base) 279 kobject_del(&desc->kobj); 280 } 281 282 static int __init irq_sysfs_init(void) 283 { 284 struct irq_desc *desc; 285 int irq; 286 287 /* Prevent concurrent irq alloc/free */ 288 irq_lock_sparse(); 289 290 irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); 291 if (!irq_kobj_base) { 292 irq_unlock_sparse(); 293 return -ENOMEM; 294 } 295 296 /* Add the already allocated interrupts */ 297 for_each_irq_desc(irq, desc) 298 irq_sysfs_add(irq, desc); 299 irq_unlock_sparse(); 300 301 return 0; 302 } 303 postcore_initcall(irq_sysfs_init); 304 305 #else /* !CONFIG_SYSFS */ 306 307 static struct kobj_type irq_kobj_type = { 308 .release = irq_kobj_release, 309 }; 310 311 static void irq_sysfs_add(int irq, struct irq_desc *desc) {} 312 static void irq_sysfs_del(struct irq_desc *desc) {} 313 314 #endif /* CONFIG_SYSFS */ 315 316 static RADIX_TREE(irq_desc_tree, GFP_KERNEL); 317 318 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) 319 { 320 radix_tree_insert(&irq_desc_tree, irq, desc); 321 } 322 323 struct irq_desc *irq_to_desc(unsigned int irq) 324 { 325 return radix_tree_lookup(&irq_desc_tree, irq); 326 } 327 EXPORT_SYMBOL(irq_to_desc); 328 329 static void delete_irq_desc(unsigned int irq) 330 { 331 radix_tree_delete(&irq_desc_tree, irq); 332 } 333 334 #ifdef CONFIG_SMP 335 static void free_masks(struct irq_desc *desc) 336 { 337 #ifdef CONFIG_GENERIC_PENDING_IRQ 338 free_cpumask_var(desc->pending_mask); 339 #endif 340 free_cpumask_var(desc->irq_common_data.affinity); 341 } 342 #else 343 static inline void free_masks(struct irq_desc *desc) { } 344 #endif 345 346 void irq_lock_sparse(void) 347 { 348 mutex_lock(&sparse_irq_lock); 349 } 350 351 void irq_unlock_sparse(void) 352 { 353 mutex_unlock(&sparse_irq_lock); 354 } 355 356 static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, 357 const struct cpumask *affinity, 358 struct module *owner) 359 { 360 struct irq_desc *desc; 361 gfp_t gfp = GFP_KERNEL; 362 363 desc = kzalloc_node(sizeof(*desc), gfp, node); 364 if (!desc) 365 return NULL; 366 /* allocate based on nr_cpu_ids */ 367 desc->kstat_irqs = alloc_percpu(unsigned int); 368 if (!desc->kstat_irqs) 369 goto err_desc; 370 371 if (alloc_masks(desc, gfp, node)) 372 goto err_kstat; 373 374 raw_spin_lock_init(&desc->lock); 375 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 376 init_rcu_head(&desc->rcu); 377 378 desc_set_defaults(irq, desc, node, affinity, owner); 379 irqd_set(&desc->irq_data, flags); 380 kobject_init(&desc->kobj, &irq_kobj_type); 381 382 return desc; 383 384 err_kstat: 385 free_percpu(desc->kstat_irqs); 386 err_desc: 387 kfree(desc); 388 return NULL; 389 } 390 391 static void irq_kobj_release(struct kobject *kobj) 392 { 393 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); 394 395 free_masks(desc); 396 free_percpu(desc->kstat_irqs); 397 kfree(desc); 398 } 399 400 static void delayed_free_desc(struct rcu_head *rhp) 401 { 402 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); 403 404 kobject_put(&desc->kobj); 405 } 406 407 static void free_desc(unsigned int irq) 408 { 409 struct irq_desc *desc = irq_to_desc(irq); 410 411 unregister_irq_proc(irq, desc); 412 413 /* 414 * sparse_irq_lock protects also show_interrupts() and 415 * kstat_irq_usr(). Once we deleted the descriptor from the 416 * sparse tree we can free it. Access in proc will fail to 417 * lookup the descriptor. 418 * 419 * The sysfs entry must be serialized against a concurrent 420 * irq_sysfs_init() as well. 421 */ 422 irq_sysfs_del(desc); 423 delete_irq_desc(irq); 424 425 /* 426 * We free the descriptor, masks and stat fields via RCU. That 427 * allows demultiplex interrupts to do rcu based management of 428 * the child interrupts. 429 */ 430 call_rcu(&desc->rcu, delayed_free_desc); 431 } 432 433 static int alloc_descs(unsigned int start, unsigned int cnt, int node, 434 const struct cpumask *affinity, struct module *owner) 435 { 436 const struct cpumask *mask = NULL; 437 struct irq_desc *desc; 438 unsigned int flags; 439 int i; 440 441 /* Validate affinity mask(s) */ 442 if (affinity) { 443 for (i = 0, mask = affinity; i < cnt; i++, mask++) { 444 if (cpumask_empty(mask)) 445 return -EINVAL; 446 } 447 } 448 449 flags = affinity ? IRQD_AFFINITY_MANAGED : 0; 450 mask = NULL; 451 452 for (i = 0; i < cnt; i++) { 453 if (affinity) { 454 node = cpu_to_node(cpumask_first(affinity)); 455 mask = affinity; 456 affinity++; 457 } 458 desc = alloc_desc(start + i, node, flags, mask, owner); 459 if (!desc) 460 goto err; 461 irq_insert_desc(start + i, desc); 462 irq_sysfs_add(start + i, desc); 463 } 464 bitmap_set(allocated_irqs, start, cnt); 465 return start; 466 467 err: 468 for (i--; i >= 0; i--) 469 free_desc(start + i); 470 return -ENOMEM; 471 } 472 473 static int irq_expand_nr_irqs(unsigned int nr) 474 { 475 if (nr > IRQ_BITMAP_BITS) 476 return -ENOMEM; 477 nr_irqs = nr; 478 return 0; 479 } 480 481 int __init early_irq_init(void) 482 { 483 int i, initcnt, node = first_online_node; 484 struct irq_desc *desc; 485 486 init_irq_default_affinity(); 487 488 /* Let arch update nr_irqs and return the nr of preallocated irqs */ 489 initcnt = arch_probe_nr_irqs(); 490 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); 491 492 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) 493 nr_irqs = IRQ_BITMAP_BITS; 494 495 if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) 496 initcnt = IRQ_BITMAP_BITS; 497 498 if (initcnt > nr_irqs) 499 nr_irqs = initcnt; 500 501 for (i = 0; i < initcnt; i++) { 502 desc = alloc_desc(i, node, 0, NULL, NULL); 503 set_bit(i, allocated_irqs); 504 irq_insert_desc(i, desc); 505 } 506 return arch_early_irq_init(); 507 } 508 509 #else /* !CONFIG_SPARSE_IRQ */ 510 511 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 512 [0 ... NR_IRQS-1] = { 513 .handle_irq = handle_bad_irq, 514 .depth = 1, 515 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 516 } 517 }; 518 519 int __init early_irq_init(void) 520 { 521 int count, i, node = first_online_node; 522 struct irq_desc *desc; 523 524 init_irq_default_affinity(); 525 526 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 527 528 desc = irq_desc; 529 count = ARRAY_SIZE(irq_desc); 530 531 for (i = 0; i < count; i++) { 532 desc[i].kstat_irqs = alloc_percpu(unsigned int); 533 alloc_masks(&desc[i], GFP_KERNEL, node); 534 raw_spin_lock_init(&desc[i].lock); 535 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 536 desc_set_defaults(i, &desc[i], node, NULL, NULL); 537 } 538 return arch_early_irq_init(); 539 } 540 541 struct irq_desc *irq_to_desc(unsigned int irq) 542 { 543 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 544 } 545 EXPORT_SYMBOL(irq_to_desc); 546 547 static void free_desc(unsigned int irq) 548 { 549 struct irq_desc *desc = irq_to_desc(irq); 550 unsigned long flags; 551 552 raw_spin_lock_irqsave(&desc->lock, flags); 553 desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); 554 raw_spin_unlock_irqrestore(&desc->lock, flags); 555 } 556 557 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, 558 const struct cpumask *affinity, 559 struct module *owner) 560 { 561 u32 i; 562 563 for (i = 0; i < cnt; i++) { 564 struct irq_desc *desc = irq_to_desc(start + i); 565 566 desc->owner = owner; 567 } 568 bitmap_set(allocated_irqs, start, cnt); 569 return start; 570 } 571 572 static int irq_expand_nr_irqs(unsigned int nr) 573 { 574 return -ENOMEM; 575 } 576 577 void irq_mark_irq(unsigned int irq) 578 { 579 mutex_lock(&sparse_irq_lock); 580 bitmap_set(allocated_irqs, irq, 1); 581 mutex_unlock(&sparse_irq_lock); 582 } 583 584 #ifdef CONFIG_GENERIC_IRQ_LEGACY 585 void irq_init_desc(unsigned int irq) 586 { 587 free_desc(irq); 588 } 589 #endif 590 591 #endif /* !CONFIG_SPARSE_IRQ */ 592 593 /** 594 * generic_handle_irq - Invoke the handler for a particular irq 595 * @irq: The irq number to handle 596 * 597 */ 598 int generic_handle_irq(unsigned int irq) 599 { 600 struct irq_desc *desc = irq_to_desc(irq); 601 602 if (!desc) 603 return -EINVAL; 604 generic_handle_irq_desc(desc); 605 return 0; 606 } 607 EXPORT_SYMBOL_GPL(generic_handle_irq); 608 609 #ifdef CONFIG_HANDLE_DOMAIN_IRQ 610 /** 611 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain 612 * @domain: The domain where to perform the lookup 613 * @hwirq: The HW irq number to convert to a logical one 614 * @lookup: Whether to perform the domain lookup or not 615 * @regs: Register file coming from the low-level handling code 616 * 617 * Returns: 0 on success, or -EINVAL if conversion has failed 618 */ 619 int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, 620 bool lookup, struct pt_regs *regs) 621 { 622 struct pt_regs *old_regs = set_irq_regs(regs); 623 unsigned int irq = hwirq; 624 int ret = 0; 625 626 irq_enter(); 627 628 #ifdef CONFIG_IRQ_DOMAIN 629 if (lookup) 630 irq = irq_find_mapping(domain, hwirq); 631 #endif 632 633 /* 634 * Some hardware gives randomly wrong interrupts. Rather 635 * than crashing, do something sensible. 636 */ 637 if (unlikely(!irq || irq >= nr_irqs)) { 638 ack_bad_irq(irq); 639 ret = -EINVAL; 640 } else { 641 generic_handle_irq(irq); 642 } 643 644 irq_exit(); 645 set_irq_regs(old_regs); 646 return ret; 647 } 648 #endif 649 650 /* Dynamic interrupt handling */ 651 652 /** 653 * irq_free_descs - free irq descriptors 654 * @from: Start of descriptor range 655 * @cnt: Number of consecutive irqs to free 656 */ 657 void irq_free_descs(unsigned int from, unsigned int cnt) 658 { 659 int i; 660 661 if (from >= nr_irqs || (from + cnt) > nr_irqs) 662 return; 663 664 mutex_lock(&sparse_irq_lock); 665 for (i = 0; i < cnt; i++) 666 free_desc(from + i); 667 668 bitmap_clear(allocated_irqs, from, cnt); 669 mutex_unlock(&sparse_irq_lock); 670 } 671 EXPORT_SYMBOL_GPL(irq_free_descs); 672 673 /** 674 * irq_alloc_descs - allocate and initialize a range of irq descriptors 675 * @irq: Allocate for specific irq number if irq >= 0 676 * @from: Start the search from this irq number 677 * @cnt: Number of consecutive irqs to allocate. 678 * @node: Preferred node on which the irq descriptor should be allocated 679 * @owner: Owning module (can be NULL) 680 * @affinity: Optional pointer to an affinity mask array of size @cnt which 681 * hints where the irq descriptors should be allocated and which 682 * default affinities to use 683 * 684 * Returns the first irq number or error code 685 */ 686 int __ref 687 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 688 struct module *owner, const struct cpumask *affinity) 689 { 690 int start, ret; 691 692 if (!cnt) 693 return -EINVAL; 694 695 if (irq >= 0) { 696 if (from > irq) 697 return -EINVAL; 698 from = irq; 699 } else { 700 /* 701 * For interrupts which are freely allocated the 702 * architecture can force a lower bound to the @from 703 * argument. x86 uses this to exclude the GSI space. 704 */ 705 from = arch_dynirq_lower_bound(from); 706 } 707 708 mutex_lock(&sparse_irq_lock); 709 710 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, 711 from, cnt, 0); 712 ret = -EEXIST; 713 if (irq >=0 && start != irq) 714 goto unlock; 715 716 if (start + cnt > nr_irqs) { 717 ret = irq_expand_nr_irqs(start + cnt); 718 if (ret) 719 goto unlock; 720 } 721 ret = alloc_descs(start, cnt, node, affinity, owner); 722 unlock: 723 mutex_unlock(&sparse_irq_lock); 724 return ret; 725 } 726 EXPORT_SYMBOL_GPL(__irq_alloc_descs); 727 728 #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ 729 /** 730 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware 731 * @cnt: number of interrupts to allocate 732 * @node: node on which to allocate 733 * 734 * Returns an interrupt number > 0 or 0, if the allocation fails. 735 */ 736 unsigned int irq_alloc_hwirqs(int cnt, int node) 737 { 738 int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL); 739 740 if (irq < 0) 741 return 0; 742 743 for (i = irq; cnt > 0; i++, cnt--) { 744 if (arch_setup_hwirq(i, node)) 745 goto err; 746 irq_clear_status_flags(i, _IRQ_NOREQUEST); 747 } 748 return irq; 749 750 err: 751 for (i--; i >= irq; i--) { 752 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); 753 arch_teardown_hwirq(i); 754 } 755 irq_free_descs(irq, cnt); 756 return 0; 757 } 758 EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); 759 760 /** 761 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware 762 * @from: Free from irq number 763 * @cnt: number of interrupts to free 764 * 765 */ 766 void irq_free_hwirqs(unsigned int from, int cnt) 767 { 768 int i, j; 769 770 for (i = from, j = cnt; j > 0; i++, j--) { 771 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); 772 arch_teardown_hwirq(i); 773 } 774 irq_free_descs(from, cnt); 775 } 776 EXPORT_SYMBOL_GPL(irq_free_hwirqs); 777 #endif 778 779 /** 780 * irq_get_next_irq - get next allocated irq number 781 * @offset: where to start the search 782 * 783 * Returns next irq number after offset or nr_irqs if none is found. 784 */ 785 unsigned int irq_get_next_irq(unsigned int offset) 786 { 787 return find_next_bit(allocated_irqs, nr_irqs, offset); 788 } 789 790 struct irq_desc * 791 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, 792 unsigned int check) 793 { 794 struct irq_desc *desc = irq_to_desc(irq); 795 796 if (desc) { 797 if (check & _IRQ_DESC_CHECK) { 798 if ((check & _IRQ_DESC_PERCPU) && 799 !irq_settings_is_per_cpu_devid(desc)) 800 return NULL; 801 802 if (!(check & _IRQ_DESC_PERCPU) && 803 irq_settings_is_per_cpu_devid(desc)) 804 return NULL; 805 } 806 807 if (bus) 808 chip_bus_lock(desc); 809 raw_spin_lock_irqsave(&desc->lock, *flags); 810 } 811 return desc; 812 } 813 814 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) 815 { 816 raw_spin_unlock_irqrestore(&desc->lock, flags); 817 if (bus) 818 chip_bus_sync_unlock(desc); 819 } 820 821 int irq_set_percpu_devid_partition(unsigned int irq, 822 const struct cpumask *affinity) 823 { 824 struct irq_desc *desc = irq_to_desc(irq); 825 826 if (!desc) 827 return -EINVAL; 828 829 if (desc->percpu_enabled) 830 return -EINVAL; 831 832 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); 833 834 if (!desc->percpu_enabled) 835 return -ENOMEM; 836 837 if (affinity) 838 desc->percpu_affinity = affinity; 839 else 840 desc->percpu_affinity = cpu_possible_mask; 841 842 irq_set_percpu_devid_flags(irq); 843 return 0; 844 } 845 846 int irq_set_percpu_devid(unsigned int irq) 847 { 848 return irq_set_percpu_devid_partition(irq, NULL); 849 } 850 851 int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) 852 { 853 struct irq_desc *desc = irq_to_desc(irq); 854 855 if (!desc || !desc->percpu_enabled) 856 return -EINVAL; 857 858 if (affinity) 859 cpumask_copy(affinity, desc->percpu_affinity); 860 861 return 0; 862 } 863 864 void kstat_incr_irq_this_cpu(unsigned int irq) 865 { 866 kstat_incr_irqs_this_cpu(irq_to_desc(irq)); 867 } 868 869 /** 870 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu 871 * @irq: The interrupt number 872 * @cpu: The cpu number 873 * 874 * Returns the sum of interrupt counts on @cpu since boot for 875 * @irq. The caller must ensure that the interrupt is not removed 876 * concurrently. 877 */ 878 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 879 { 880 struct irq_desc *desc = irq_to_desc(irq); 881 882 return desc && desc->kstat_irqs ? 883 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; 884 } 885 886 /** 887 * kstat_irqs - Get the statistics for an interrupt 888 * @irq: The interrupt number 889 * 890 * Returns the sum of interrupt counts on all cpus since boot for 891 * @irq. The caller must ensure that the interrupt is not removed 892 * concurrently. 893 */ 894 unsigned int kstat_irqs(unsigned int irq) 895 { 896 struct irq_desc *desc = irq_to_desc(irq); 897 unsigned int sum = 0; 898 int cpu; 899 900 if (!desc || !desc->kstat_irqs) 901 return 0; 902 if (!irq_settings_is_per_cpu_devid(desc) && 903 !irq_settings_is_per_cpu(desc)) 904 return desc->tot_count; 905 906 for_each_possible_cpu(cpu) 907 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 908 return sum; 909 } 910 911 /** 912 * kstat_irqs_usr - Get the statistics for an interrupt 913 * @irq: The interrupt number 914 * 915 * Returns the sum of interrupt counts on all cpus since boot for 916 * @irq. Contrary to kstat_irqs() this can be called from any 917 * preemptible context. It's protected against concurrent removal of 918 * an interrupt descriptor when sparse irqs are enabled. 919 */ 920 unsigned int kstat_irqs_usr(unsigned int irq) 921 { 922 unsigned int sum; 923 924 irq_lock_sparse(); 925 sum = kstat_irqs(irq); 926 irq_unlock_sparse(); 927 return sum; 928 } 929
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.