~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kernel/vio.c

Version: ~ [ linux-5.2-rc1 ] ~ [ linux-5.1.2 ] ~ [ linux-5.0.16 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.43 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.119 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.176 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.179 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.139 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.67 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * IBM PowerPC Virtual I/O Infrastructure Support.
  3  *
  4  *    Copyright (c) 2003,2008 IBM Corp.
  5  *     Dave Engebretsen engebret@us.ibm.com
  6  *     Santiago Leon santil@us.ibm.com
  7  *     Hollis Blanchard <hollisb@us.ibm.com>
  8  *     Stephen Rothwell
  9  *     Robert Jennings <rcjenn@us.ibm.com>
 10  *
 11  *      This program is free software; you can redistribute it and/or
 12  *      modify it under the terms of the GNU General Public License
 13  *      as published by the Free Software Foundation; either version
 14  *      2 of the License, or (at your option) any later version.
 15  */
 16 
 17 #include <linux/cpu.h>
 18 #include <linux/types.h>
 19 #include <linux/delay.h>
 20 #include <linux/stat.h>
 21 #include <linux/device.h>
 22 #include <linux/init.h>
 23 #include <linux/slab.h>
 24 #include <linux/console.h>
 25 #include <linux/export.h>
 26 #include <linux/mm.h>
 27 #include <linux/dma-mapping.h>
 28 #include <linux/kobject.h>
 29 
 30 #include <asm/iommu.h>
 31 #include <asm/dma.h>
 32 #include <asm/vio.h>
 33 #include <asm/prom.h>
 34 #include <asm/firmware.h>
 35 #include <asm/tce.h>
 36 #include <asm/page.h>
 37 #include <asm/hvcall.h>
 38 
 39 static struct vio_dev vio_bus_device  = { /* fake "parent" device */
 40         .name = "vio",
 41         .type = "",
 42         .dev.init_name = "vio",
 43         .dev.bus = &vio_bus_type,
 44 };
 45 
 46 #ifdef CONFIG_PPC_SMLPAR
 47 /**
 48  * vio_cmo_pool - A pool of IO memory for CMO use
 49  *
 50  * @size: The size of the pool in bytes
 51  * @free: The amount of free memory in the pool
 52  */
 53 struct vio_cmo_pool {
 54         size_t size;
 55         size_t free;
 56 };
 57 
 58 /* How many ms to delay queued balance work */
 59 #define VIO_CMO_BALANCE_DELAY 100
 60 
 61 /* Portion out IO memory to CMO devices by this chunk size */
 62 #define VIO_CMO_BALANCE_CHUNK 131072
 63 
 64 /**
 65  * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
 66  *
 67  * @vio_dev: struct vio_dev pointer
 68  * @list: pointer to other devices on bus that are being tracked
 69  */
 70 struct vio_cmo_dev_entry {
 71         struct vio_dev *viodev;
 72         struct list_head list;
 73 };
 74 
 75 /**
 76  * vio_cmo - VIO bus accounting structure for CMO entitlement
 77  *
 78  * @lock: spinlock for entire structure
 79  * @balance_q: work queue for balancing system entitlement
 80  * @device_list: list of CMO-enabled devices requiring entitlement
 81  * @entitled: total system entitlement in bytes
 82  * @reserve: pool of memory from which devices reserve entitlement, incl. spare
 83  * @excess: pool of excess entitlement not needed for device reserves or spare
 84  * @spare: IO memory for device hotplug functionality
 85  * @min: minimum necessary for system operation
 86  * @desired: desired memory for system operation
 87  * @curr: bytes currently allocated
 88  * @high: high water mark for IO data usage
 89  */
 90 struct vio_cmo {
 91         spinlock_t lock;
 92         struct delayed_work balance_q;
 93         struct list_head device_list;
 94         size_t entitled;
 95         struct vio_cmo_pool reserve;
 96         struct vio_cmo_pool excess;
 97         size_t spare;
 98         size_t min;
 99         size_t desired;
100         size_t curr;
101         size_t high;
102 } vio_cmo;
103 
104 /**
105  * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
106  */
107 static int vio_cmo_num_OF_devs(void)
108 {
109         struct device_node *node_vroot;
110         int count = 0;
111 
112         /*
113          * Count the number of vdevice entries with an
114          * ibm,my-dma-window OF property
115          */
116         node_vroot = of_find_node_by_name(NULL, "vdevice");
117         if (node_vroot) {
118                 struct device_node *of_node;
119                 struct property *prop;
120 
121                 for_each_child_of_node(node_vroot, of_node) {
122                         prop = of_find_property(of_node, "ibm,my-dma-window",
123                                                NULL);
124                         if (prop)
125                                 count++;
126                 }
127         }
128         of_node_put(node_vroot);
129         return count;
130 }
131 
132 /**
133  * vio_cmo_alloc - allocate IO memory for CMO-enable devices
134  *
135  * @viodev: VIO device requesting IO memory
136  * @size: size of allocation requested
137  *
138  * Allocations come from memory reserved for the devices and any excess
139  * IO memory available to all devices.  The spare pool used to service
140  * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
141  * made available.
142  *
143  * Return codes:
144  *  0 for successful allocation and -ENOMEM for a failure
145  */
146 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
147 {
148         unsigned long flags;
149         size_t reserve_free = 0;
150         size_t excess_free = 0;
151         int ret = -ENOMEM;
152 
153         spin_lock_irqsave(&vio_cmo.lock, flags);
154 
155         /* Determine the amount of free entitlement available in reserve */
156         if (viodev->cmo.entitled > viodev->cmo.allocated)
157                 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
158 
159         /* If spare is not fulfilled, the excess pool can not be used. */
160         if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
161                 excess_free = vio_cmo.excess.free;
162 
163         /* The request can be satisfied */
164         if ((reserve_free + excess_free) >= size) {
165                 vio_cmo.curr += size;
166                 if (vio_cmo.curr > vio_cmo.high)
167                         vio_cmo.high = vio_cmo.curr;
168                 viodev->cmo.allocated += size;
169                 size -= min(reserve_free, size);
170                 vio_cmo.excess.free -= size;
171                 ret = 0;
172         }
173 
174         spin_unlock_irqrestore(&vio_cmo.lock, flags);
175         return ret;
176 }
177 
178 /**
179  * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
180  * @viodev: VIO device freeing IO memory
181  * @size: size of deallocation
182  *
183  * IO memory is freed by the device back to the correct memory pools.
184  * The spare pool is replenished first from either memory pool, then
185  * the reserve pool is used to reduce device entitlement, the excess
186  * pool is used to increase the reserve pool toward the desired entitlement
187  * target, and then the remaining memory is returned to the pools.
188  *
189  */
190 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
191 {
192         unsigned long flags;
193         size_t spare_needed = 0;
194         size_t excess_freed = 0;
195         size_t reserve_freed = size;
196         size_t tmp;
197         int balance = 0;
198 
199         spin_lock_irqsave(&vio_cmo.lock, flags);
200         vio_cmo.curr -= size;
201 
202         /* Amount of memory freed from the excess pool */
203         if (viodev->cmo.allocated > viodev->cmo.entitled) {
204                 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
205                                                    viodev->cmo.entitled));
206                 reserve_freed -= excess_freed;
207         }
208 
209         /* Remove allocation from device */
210         viodev->cmo.allocated -= (reserve_freed + excess_freed);
211 
212         /* Spare is a subset of the reserve pool, replenish it first. */
213         spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
214 
215         /*
216          * Replenish the spare in the reserve pool from the excess pool.
217          * This moves entitlement into the reserve pool.
218          */
219         if (spare_needed && excess_freed) {
220                 tmp = min(excess_freed, spare_needed);
221                 vio_cmo.excess.size -= tmp;
222                 vio_cmo.reserve.size += tmp;
223                 vio_cmo.spare += tmp;
224                 excess_freed -= tmp;
225                 spare_needed -= tmp;
226                 balance = 1;
227         }
228 
229         /*
230          * Replenish the spare in the reserve pool from the reserve pool.
231          * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
232          * if needed, and gives it to the spare pool. The amount of used
233          * memory in this pool does not change.
234          */
235         if (spare_needed && reserve_freed) {
236                 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
237 
238                 vio_cmo.spare += tmp;
239                 viodev->cmo.entitled -= tmp;
240                 reserve_freed -= tmp;
241                 spare_needed -= tmp;
242                 balance = 1;
243         }
244 
245         /*
246          * Increase the reserve pool until the desired allocation is met.
247          * Move an allocation freed from the excess pool into the reserve
248          * pool and schedule a balance operation.
249          */
250         if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
251                 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
252 
253                 vio_cmo.excess.size -= tmp;
254                 vio_cmo.reserve.size += tmp;
255                 excess_freed -= tmp;
256                 balance = 1;
257         }
258 
259         /* Return memory from the excess pool to that pool */
260         if (excess_freed)
261                 vio_cmo.excess.free += excess_freed;
262 
263         if (balance)
264                 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
265         spin_unlock_irqrestore(&vio_cmo.lock, flags);
266 }
267 
268 /**
269  * vio_cmo_entitlement_update - Manage system entitlement changes
270  *
271  * @new_entitlement: new system entitlement to attempt to accommodate
272  *
273  * Increases in entitlement will be used to fulfill the spare entitlement
274  * and the rest is given to the excess pool.  Decreases, if they are
275  * possible, come from the excess pool and from unused device entitlement
276  *
277  * Returns: 0 on success, -ENOMEM when change can not be made
278  */
279 int vio_cmo_entitlement_update(size_t new_entitlement)
280 {
281         struct vio_dev *viodev;
282         struct vio_cmo_dev_entry *dev_ent;
283         unsigned long flags;
284         size_t avail, delta, tmp;
285 
286         spin_lock_irqsave(&vio_cmo.lock, flags);
287 
288         /* Entitlement increases */
289         if (new_entitlement > vio_cmo.entitled) {
290                 delta = new_entitlement - vio_cmo.entitled;
291 
292                 /* Fulfill spare allocation */
293                 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
294                         tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
295                         vio_cmo.spare += tmp;
296                         vio_cmo.reserve.size += tmp;
297                         delta -= tmp;
298                 }
299 
300                 /* Remaining new allocation goes to the excess pool */
301                 vio_cmo.entitled += delta;
302                 vio_cmo.excess.size += delta;
303                 vio_cmo.excess.free += delta;
304 
305                 goto out;
306         }
307 
308         /* Entitlement decreases */
309         delta = vio_cmo.entitled - new_entitlement;
310         avail = vio_cmo.excess.free;
311 
312         /*
313          * Need to check how much unused entitlement each device can
314          * sacrifice to fulfill entitlement change.
315          */
316         list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
317                 if (avail >= delta)
318                         break;
319 
320                 viodev = dev_ent->viodev;
321                 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
322                     (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
323                                 avail += viodev->cmo.entitled -
324                                          max_t(size_t, viodev->cmo.allocated,
325                                                VIO_CMO_MIN_ENT);
326         }
327 
328         if (delta <= avail) {
329                 vio_cmo.entitled -= delta;
330 
331                 /* Take entitlement from the excess pool first */
332                 tmp = min(vio_cmo.excess.free, delta);
333                 vio_cmo.excess.size -= tmp;
334                 vio_cmo.excess.free -= tmp;
335                 delta -= tmp;
336 
337                 /*
338                  * Remove all but VIO_CMO_MIN_ENT bytes from devices
339                  * until entitlement change is served
340                  */
341                 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
342                         if (!delta)
343                                 break;
344 
345                         viodev = dev_ent->viodev;
346                         tmp = 0;
347                         if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
348                             (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
349                                 tmp = viodev->cmo.entitled -
350                                       max_t(size_t, viodev->cmo.allocated,
351                                             VIO_CMO_MIN_ENT);
352                         viodev->cmo.entitled -= min(tmp, delta);
353                         delta -= min(tmp, delta);
354                 }
355         } else {
356                 spin_unlock_irqrestore(&vio_cmo.lock, flags);
357                 return -ENOMEM;
358         }
359 
360 out:
361         schedule_delayed_work(&vio_cmo.balance_q, 0);
362         spin_unlock_irqrestore(&vio_cmo.lock, flags);
363         return 0;
364 }
365 
366 /**
367  * vio_cmo_balance - Balance entitlement among devices
368  *
369  * @work: work queue structure for this operation
370  *
371  * Any system entitlement above the minimum needed for devices, or
372  * already allocated to devices, can be distributed to the devices.
373  * The list of devices is iterated through to recalculate the desired
374  * entitlement level and to determine how much entitlement above the
375  * minimum entitlement is allocated to devices.
376  *
377  * Small chunks of the available entitlement are given to devices until
378  * their requirements are fulfilled or there is no entitlement left to give.
379  * Upon completion sizes of the reserve and excess pools are calculated.
380  *
381  * The system minimum entitlement level is also recalculated here.
382  * Entitlement will be reserved for devices even after vio_bus_remove to
383  * accommodate reloading the driver.  The OF tree is walked to count the
384  * number of devices present and this will remove entitlement for devices
385  * that have actually left the system after having vio_bus_remove called.
386  */
387 static void vio_cmo_balance(struct work_struct *work)
388 {
389         struct vio_cmo *cmo;
390         struct vio_dev *viodev;
391         struct vio_cmo_dev_entry *dev_ent;
392         unsigned long flags;
393         size_t avail = 0, level, chunk, need;
394         int devcount = 0, fulfilled;
395 
396         cmo = container_of(work, struct vio_cmo, balance_q.work);
397 
398         spin_lock_irqsave(&vio_cmo.lock, flags);
399 
400         /* Calculate minimum entitlement and fulfill spare */
401         cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
402         BUG_ON(cmo->min > cmo->entitled);
403         cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
404         cmo->min += cmo->spare;
405         cmo->desired = cmo->min;
406 
407         /*
408          * Determine how much entitlement is available and reset device
409          * entitlements
410          */
411         avail = cmo->entitled - cmo->spare;
412         list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
413                 viodev = dev_ent->viodev;
414                 devcount++;
415                 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
416                 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
417                 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
418         }
419 
420         /*
421          * Having provided each device with the minimum entitlement, loop
422          * over the devices portioning out the remaining entitlement
423          * until there is nothing left.
424          */
425         level = VIO_CMO_MIN_ENT;
426         while (avail) {
427                 fulfilled = 0;
428                 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
429                         viodev = dev_ent->viodev;
430 
431                         if (viodev->cmo.desired <= level) {
432                                 fulfilled++;
433                                 continue;
434                         }
435 
436                         /*
437                          * Give the device up to VIO_CMO_BALANCE_CHUNK
438                          * bytes of entitlement, but do not exceed the
439                          * desired level of entitlement for the device.
440                          */
441                         chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
442                         chunk = min(chunk, (viodev->cmo.desired -
443                                             viodev->cmo.entitled));
444                         viodev->cmo.entitled += chunk;
445 
446                         /*
447                          * If the memory for this entitlement increase was
448                          * already allocated to the device it does not come
449                          * from the available pool being portioned out.
450                          */
451                         need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
452                                max(viodev->cmo.allocated, level);
453                         avail -= need;
454 
455                 }
456                 if (fulfilled == devcount)
457                         break;
458                 level += VIO_CMO_BALANCE_CHUNK;
459         }
460 
461         /* Calculate new reserve and excess pool sizes */
462         cmo->reserve.size = cmo->min;
463         cmo->excess.free = 0;
464         cmo->excess.size = 0;
465         need = 0;
466         list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
467                 viodev = dev_ent->viodev;
468                 /* Calculated reserve size above the minimum entitlement */
469                 if (viodev->cmo.entitled)
470                         cmo->reserve.size += (viodev->cmo.entitled -
471                                               VIO_CMO_MIN_ENT);
472                 /* Calculated used excess entitlement */
473                 if (viodev->cmo.allocated > viodev->cmo.entitled)
474                         need += viodev->cmo.allocated - viodev->cmo.entitled;
475         }
476         cmo->excess.size = cmo->entitled - cmo->reserve.size;
477         cmo->excess.free = cmo->excess.size - need;
478 
479         cancel_delayed_work(to_delayed_work(work));
480         spin_unlock_irqrestore(&vio_cmo.lock, flags);
481 }
482 
483 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
484                                           dma_addr_t *dma_handle, gfp_t flag,
485                                           struct dma_attrs *attrs)
486 {
487         struct vio_dev *viodev = to_vio_dev(dev);
488         void *ret;
489 
490         if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
491                 atomic_inc(&viodev->cmo.allocs_failed);
492                 return NULL;
493         }
494 
495         ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
496         if (unlikely(ret == NULL)) {
497                 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
498                 atomic_inc(&viodev->cmo.allocs_failed);
499         }
500 
501         return ret;
502 }
503 
504 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
505                                         void *vaddr, dma_addr_t dma_handle,
506                                         struct dma_attrs *attrs)
507 {
508         struct vio_dev *viodev = to_vio_dev(dev);
509 
510         dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
511 
512         vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
513 }
514 
515 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
516                                          unsigned long offset, size_t size,
517                                          enum dma_data_direction direction,
518                                          struct dma_attrs *attrs)
519 {
520         struct vio_dev *viodev = to_vio_dev(dev);
521         struct iommu_table *tbl;
522         dma_addr_t ret = DMA_ERROR_CODE;
523 
524         tbl = get_iommu_table_base(dev);
525         if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
526                 atomic_inc(&viodev->cmo.allocs_failed);
527                 return ret;
528         }
529 
530         ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
531         if (unlikely(dma_mapping_error(dev, ret))) {
532                 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
533                 atomic_inc(&viodev->cmo.allocs_failed);
534         }
535 
536         return ret;
537 }
538 
539 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
540                                      size_t size,
541                                      enum dma_data_direction direction,
542                                      struct dma_attrs *attrs)
543 {
544         struct vio_dev *viodev = to_vio_dev(dev);
545         struct iommu_table *tbl;
546 
547         tbl = get_iommu_table_base(dev);
548         dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
549 
550         vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
551 }
552 
553 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
554                                 int nelems, enum dma_data_direction direction,
555                                 struct dma_attrs *attrs)
556 {
557         struct vio_dev *viodev = to_vio_dev(dev);
558         struct iommu_table *tbl;
559         struct scatterlist *sgl;
560         int ret, count = 0;
561         size_t alloc_size = 0;
562 
563         tbl = get_iommu_table_base(dev);
564         for (sgl = sglist; count < nelems; count++, sgl++)
565                 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
566 
567         if (vio_cmo_alloc(viodev, alloc_size)) {
568                 atomic_inc(&viodev->cmo.allocs_failed);
569                 return 0;
570         }
571 
572         ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
573 
574         if (unlikely(!ret)) {
575                 vio_cmo_dealloc(viodev, alloc_size);
576                 atomic_inc(&viodev->cmo.allocs_failed);
577                 return ret;
578         }
579 
580         for (sgl = sglist, count = 0; count < ret; count++, sgl++)
581                 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
582         if (alloc_size)
583                 vio_cmo_dealloc(viodev, alloc_size);
584 
585         return ret;
586 }
587 
588 static void vio_dma_iommu_unmap_sg(struct device *dev,
589                 struct scatterlist *sglist, int nelems,
590                 enum dma_data_direction direction,
591                 struct dma_attrs *attrs)
592 {
593         struct vio_dev *viodev = to_vio_dev(dev);
594         struct iommu_table *tbl;
595         struct scatterlist *sgl;
596         size_t alloc_size = 0;
597         int count = 0;
598 
599         tbl = get_iommu_table_base(dev);
600         for (sgl = sglist; count < nelems; count++, sgl++)
601                 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
602 
603         dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
604 
605         vio_cmo_dealloc(viodev, alloc_size);
606 }
607 
608 static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
609 {
610         return dma_iommu_ops.dma_supported(dev, mask);
611 }
612 
613 static u64 vio_dma_get_required_mask(struct device *dev)
614 {
615         return dma_iommu_ops.get_required_mask(dev);
616 }
617 
618 struct dma_map_ops vio_dma_mapping_ops = {
619         .alloc             = vio_dma_iommu_alloc_coherent,
620         .free              = vio_dma_iommu_free_coherent,
621         .mmap              = dma_direct_mmap_coherent,
622         .map_sg            = vio_dma_iommu_map_sg,
623         .unmap_sg          = vio_dma_iommu_unmap_sg,
624         .map_page          = vio_dma_iommu_map_page,
625         .unmap_page        = vio_dma_iommu_unmap_page,
626         .dma_supported     = vio_dma_iommu_dma_supported,
627         .get_required_mask = vio_dma_get_required_mask,
628 };
629 
630 /**
631  * vio_cmo_set_dev_desired - Set desired entitlement for a device
632  *
633  * @viodev: struct vio_dev for device to alter
634  * @desired: new desired entitlement level in bytes
635  *
636  * For use by devices to request a change to their entitlement at runtime or
637  * through sysfs.  The desired entitlement level is changed and a balancing
638  * of system resources is scheduled to run in the future.
639  */
640 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
641 {
642         unsigned long flags;
643         struct vio_cmo_dev_entry *dev_ent;
644         int found = 0;
645 
646         if (!firmware_has_feature(FW_FEATURE_CMO))
647                 return;
648 
649         spin_lock_irqsave(&vio_cmo.lock, flags);
650         if (desired < VIO_CMO_MIN_ENT)
651                 desired = VIO_CMO_MIN_ENT;
652 
653         /*
654          * Changes will not be made for devices not in the device list.
655          * If it is not in the device list, then no driver is loaded
656          * for the device and it can not receive entitlement.
657          */
658         list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
659                 if (viodev == dev_ent->viodev) {
660                         found = 1;
661                         break;
662                 }
663         if (!found) {
664                 spin_unlock_irqrestore(&vio_cmo.lock, flags);
665                 return;
666         }
667 
668         /* Increase/decrease in desired device entitlement */
669         if (desired >= viodev->cmo.desired) {
670                 /* Just bump the bus and device values prior to a balance*/
671                 vio_cmo.desired += desired - viodev->cmo.desired;
672                 viodev->cmo.desired = desired;
673         } else {
674                 /* Decrease bus and device values for desired entitlement */
675                 vio_cmo.desired -= viodev->cmo.desired - desired;
676                 viodev->cmo.desired = desired;
677                 /*
678                  * If less entitlement is desired than current entitlement, move
679                  * any reserve memory in the change region to the excess pool.
680                  */
681                 if (viodev->cmo.entitled > desired) {
682                         vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
683                         vio_cmo.excess.size += viodev->cmo.entitled - desired;
684                         /*
685                          * If entitlement moving from the reserve pool to the
686                          * excess pool is currently unused, add to the excess
687                          * free counter.
688                          */
689                         if (viodev->cmo.allocated < viodev->cmo.entitled)
690                                 vio_cmo.excess.free += viodev->cmo.entitled -
691                                                        max(viodev->cmo.allocated, desired);
692                         viodev->cmo.entitled = desired;
693                 }
694         }
695         schedule_delayed_work(&vio_cmo.balance_q, 0);
696         spin_unlock_irqrestore(&vio_cmo.lock, flags);
697 }
698 
699 /**
700  * vio_cmo_bus_probe - Handle CMO specific bus probe activities
701  *
702  * @viodev - Pointer to struct vio_dev for device
703  *
704  * Determine the devices IO memory entitlement needs, attempting
705  * to satisfy the system minimum entitlement at first and scheduling
706  * a balance operation to take care of the rest at a later time.
707  *
708  * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
709  *          -ENOMEM when entitlement is not available for device or
710  *          device entry.
711  *
712  */
713 static int vio_cmo_bus_probe(struct vio_dev *viodev)
714 {
715         struct vio_cmo_dev_entry *dev_ent;
716         struct device *dev = &viodev->dev;
717         struct iommu_table *tbl;
718         struct vio_driver *viodrv = to_vio_driver(dev->driver);
719         unsigned long flags;
720         size_t size;
721         bool dma_capable = false;
722 
723         tbl = get_iommu_table_base(dev);
724 
725         /* A device requires entitlement if it has a DMA window property */
726         switch (viodev->family) {
727         case VDEVICE:
728                 if (of_get_property(viodev->dev.of_node,
729                                         "ibm,my-dma-window", NULL))
730                         dma_capable = true;
731                 break;
732         case PFO:
733                 dma_capable = false;
734                 break;
735         default:
736                 dev_warn(dev, "unknown device family: %d\n", viodev->family);
737                 BUG();
738                 break;
739         }
740 
741         /* Configure entitlement for the device. */
742         if (dma_capable) {
743                 /* Check that the driver is CMO enabled and get desired DMA */
744                 if (!viodrv->get_desired_dma) {
745                         dev_err(dev, "%s: device driver does not support CMO\n",
746                                 __func__);
747                         return -EINVAL;
748                 }
749 
750                 viodev->cmo.desired =
751                         IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
752                 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
753                         viodev->cmo.desired = VIO_CMO_MIN_ENT;
754                 size = VIO_CMO_MIN_ENT;
755 
756                 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
757                                   GFP_KERNEL);
758                 if (!dev_ent)
759                         return -ENOMEM;
760 
761                 dev_ent->viodev = viodev;
762                 spin_lock_irqsave(&vio_cmo.lock, flags);
763                 list_add(&dev_ent->list, &vio_cmo.device_list);
764         } else {
765                 viodev->cmo.desired = 0;
766                 size = 0;
767                 spin_lock_irqsave(&vio_cmo.lock, flags);
768         }
769 
770         /*
771          * If the needs for vio_cmo.min have not changed since they
772          * were last set, the number of devices in the OF tree has
773          * been constant and the IO memory for this is already in
774          * the reserve pool.
775          */
776         if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
777                             VIO_CMO_MIN_ENT)) {
778                 /* Updated desired entitlement if device requires it */
779                 if (size)
780                         vio_cmo.desired += (viodev->cmo.desired -
781                                         VIO_CMO_MIN_ENT);
782         } else {
783                 size_t tmp;
784 
785                 tmp = vio_cmo.spare + vio_cmo.excess.free;
786                 if (tmp < size) {
787                         dev_err(dev, "%s: insufficient free "
788                                 "entitlement to add device. "
789                                 "Need %lu, have %lu\n", __func__,
790                                 size, (vio_cmo.spare + tmp));
791                         spin_unlock_irqrestore(&vio_cmo.lock, flags);
792                         return -ENOMEM;
793                 }
794 
795                 /* Use excess pool first to fulfill request */
796                 tmp = min(size, vio_cmo.excess.free);
797                 vio_cmo.excess.free -= tmp;
798                 vio_cmo.excess.size -= tmp;
799                 vio_cmo.reserve.size += tmp;
800 
801                 /* Use spare if excess pool was insufficient */
802                 vio_cmo.spare -= size - tmp;
803 
804                 /* Update bus accounting */
805                 vio_cmo.min += size;
806                 vio_cmo.desired += viodev->cmo.desired;
807         }
808         spin_unlock_irqrestore(&vio_cmo.lock, flags);
809         return 0;
810 }
811 
812 /**
813  * vio_cmo_bus_remove - Handle CMO specific bus removal activities
814  *
815  * @viodev - Pointer to struct vio_dev for device
816  *
817  * Remove the device from the cmo device list.  The minimum entitlement
818  * will be reserved for the device as long as it is in the system.  The
819  * rest of the entitlement the device had been allocated will be returned
820  * to the system.
821  */
822 static void vio_cmo_bus_remove(struct vio_dev *viodev)
823 {
824         struct vio_cmo_dev_entry *dev_ent;
825         unsigned long flags;
826         size_t tmp;
827 
828         spin_lock_irqsave(&vio_cmo.lock, flags);
829         if (viodev->cmo.allocated) {
830                 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
831                         "allocated after remove operation.\n",
832                         __func__, viodev->cmo.allocated);
833                 BUG();
834         }
835 
836         /*
837          * Remove the device from the device list being maintained for
838          * CMO enabled devices.
839          */
840         list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
841                 if (viodev == dev_ent->viodev) {
842                         list_del(&dev_ent->list);
843                         kfree(dev_ent);
844                         break;
845                 }
846 
847         /*
848          * Devices may not require any entitlement and they do not need
849          * to be processed.  Otherwise, return the device's entitlement
850          * back to the pools.
851          */
852         if (viodev->cmo.entitled) {
853                 /*
854                  * This device has not yet left the OF tree, it's
855                  * minimum entitlement remains in vio_cmo.min and
856                  * vio_cmo.desired
857                  */
858                 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
859 
860                 /*
861                  * Save min allocation for device in reserve as long
862                  * as it exists in OF tree as determined by later
863                  * balance operation
864                  */
865                 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
866 
867                 /* Replenish spare from freed reserve pool */
868                 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
869                         tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
870                                                          vio_cmo.spare));
871                         vio_cmo.spare += tmp;
872                         viodev->cmo.entitled -= tmp;
873                 }
874 
875                 /* Remaining reserve goes to excess pool */
876                 vio_cmo.excess.size += viodev->cmo.entitled;
877                 vio_cmo.excess.free += viodev->cmo.entitled;
878                 vio_cmo.reserve.size -= viodev->cmo.entitled;
879 
880                 /*
881                  * Until the device is removed it will keep a
882                  * minimum entitlement; this will guarantee that
883                  * a module unload/load will result in a success.
884                  */
885                 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
886                 viodev->cmo.desired = VIO_CMO_MIN_ENT;
887                 atomic_set(&viodev->cmo.allocs_failed, 0);
888         }
889 
890         spin_unlock_irqrestore(&vio_cmo.lock, flags);
891 }
892 
893 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
894 {
895         set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
896 }
897 
898 /**
899  * vio_cmo_bus_init - CMO entitlement initialization at bus init time
900  *
901  * Set up the reserve and excess entitlement pools based on available
902  * system entitlement and the number of devices in the OF tree that
903  * require entitlement in the reserve pool.
904  */
905 static void vio_cmo_bus_init(void)
906 {
907         struct hvcall_mpp_data mpp_data;
908         int err;
909 
910         memset(&vio_cmo, 0, sizeof(struct vio_cmo));
911         spin_lock_init(&vio_cmo.lock);
912         INIT_LIST_HEAD(&vio_cmo.device_list);
913         INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
914 
915         /* Get current system entitlement */
916         err = h_get_mpp(&mpp_data);
917 
918         /*
919          * On failure, continue with entitlement set to 0, will panic()
920          * later when spare is reserved.
921          */
922         if (err != H_SUCCESS) {
923                 printk(KERN_ERR "%s: unable to determine system IO "\
924                        "entitlement. (%d)\n", __func__, err);
925                 vio_cmo.entitled = 0;
926         } else {
927                 vio_cmo.entitled = mpp_data.entitled_mem;
928         }
929 
930         /* Set reservation and check against entitlement */
931         vio_cmo.spare = VIO_CMO_MIN_ENT;
932         vio_cmo.reserve.size = vio_cmo.spare;
933         vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
934                                  VIO_CMO_MIN_ENT);
935         if (vio_cmo.reserve.size > vio_cmo.entitled) {
936                 printk(KERN_ERR "%s: insufficient system entitlement\n",
937                        __func__);
938                 panic("%s: Insufficient system entitlement", __func__);
939         }
940 
941         /* Set the remaining accounting variables */
942         vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
943         vio_cmo.excess.free = vio_cmo.excess.size;
944         vio_cmo.min = vio_cmo.reserve.size;
945         vio_cmo.desired = vio_cmo.reserve.size;
946 }
947 
948 /* sysfs device functions and data structures for CMO */
949 
950 #define viodev_cmo_rd_attr(name)                                        \
951 static ssize_t viodev_cmo_##name##_show(struct device *dev,             \
952                                         struct device_attribute *attr,  \
953                                          char *buf)                     \
954 {                                                                       \
955         return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name);        \
956 }
957 
958 static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
959                 struct device_attribute *attr, char *buf)
960 {
961         struct vio_dev *viodev = to_vio_dev(dev);
962         return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
963 }
964 
965 static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
966                 struct device_attribute *attr, const char *buf, size_t count)
967 {
968         struct vio_dev *viodev = to_vio_dev(dev);
969         atomic_set(&viodev->cmo.allocs_failed, 0);
970         return count;
971 }
972 
973 static ssize_t viodev_cmo_desired_set(struct device *dev,
974                 struct device_attribute *attr, const char *buf, size_t count)
975 {
976         struct vio_dev *viodev = to_vio_dev(dev);
977         size_t new_desired;
978         int ret;
979 
980         ret = kstrtoul(buf, 10, &new_desired);
981         if (ret)
982                 return ret;
983 
984         vio_cmo_set_dev_desired(viodev, new_desired);
985         return count;
986 }
987 
988 viodev_cmo_rd_attr(desired);
989 viodev_cmo_rd_attr(entitled);
990 viodev_cmo_rd_attr(allocated);
991 
992 static ssize_t name_show(struct device *, struct device_attribute *, char *);
993 static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
994 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
995                              char *buf);
996 static struct device_attribute vio_cmo_dev_attrs[] = {
997         __ATTR_RO(name),
998         __ATTR_RO(devspec),
999         __ATTR_RO(modalias),
1000         __ATTR(cmo_desired,       S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1001                viodev_cmo_desired_show, viodev_cmo_desired_set),
1002         __ATTR(cmo_entitled,      S_IRUGO, viodev_cmo_entitled_show,      NULL),
1003         __ATTR(cmo_allocated,     S_IRUGO, viodev_cmo_allocated_show,     NULL),
1004         __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1005                viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
1006         __ATTR_NULL
1007 };
1008 
1009 /* sysfs bus functions and data structures for CMO */
1010 
1011 #define viobus_cmo_rd_attr(name)                                        \
1012 static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf)        \
1013 {                                                                       \
1014         return sprintf(buf, "%lu\n", vio_cmo.name);                     \
1015 }                                                                       \
1016 static BUS_ATTR_RO(cmo_##name)
1017 
1018 #define viobus_cmo_pool_rd_attr(name, var)                              \
1019 static ssize_t                                                          \
1020 cmo_##name##_##var##_show(struct bus_type *bt, char *buf)               \
1021 {                                                                       \
1022         return sprintf(buf, "%lu\n", vio_cmo.name.var);                 \
1023 }                                                                       \
1024 static BUS_ATTR_RO(cmo_##name##_##var)
1025 
1026 viobus_cmo_rd_attr(entitled);
1027 viobus_cmo_rd_attr(spare);
1028 viobus_cmo_rd_attr(min);
1029 viobus_cmo_rd_attr(desired);
1030 viobus_cmo_rd_attr(curr);
1031 viobus_cmo_pool_rd_attr(reserve, size);
1032 viobus_cmo_pool_rd_attr(excess, size);
1033 viobus_cmo_pool_rd_attr(excess, free);
1034 
1035 static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1036 {
1037         return sprintf(buf, "%lu\n", vio_cmo.high);
1038 }
1039 
1040 static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1041                               size_t count)
1042 {
1043         unsigned long flags;
1044 
1045         spin_lock_irqsave(&vio_cmo.lock, flags);
1046         vio_cmo.high = vio_cmo.curr;
1047         spin_unlock_irqrestore(&vio_cmo.lock, flags);
1048 
1049         return count;
1050 }
1051 static BUS_ATTR_RW(cmo_high);
1052 
1053 static struct attribute *vio_bus_attrs[] = {
1054         &bus_attr_cmo_entitled.attr,
1055         &bus_attr_cmo_spare.attr,
1056         &bus_attr_cmo_min.attr,
1057         &bus_attr_cmo_desired.attr,
1058         &bus_attr_cmo_curr.attr,
1059         &bus_attr_cmo_high.attr,
1060         &bus_attr_cmo_reserve_size.attr,
1061         &bus_attr_cmo_excess_size.attr,
1062         &bus_attr_cmo_excess_free.attr,
1063         NULL,
1064 };
1065 ATTRIBUTE_GROUPS(vio_bus);
1066 
1067 static void vio_cmo_sysfs_init(void)
1068 {
1069         vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
1070         vio_bus_type.bus_groups = vio_bus_groups;
1071 }
1072 #else /* CONFIG_PPC_SMLPAR */
1073 int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1074 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1075 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1076 static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1077 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1078 static void vio_cmo_bus_init(void) {}
1079 static void vio_cmo_sysfs_init(void) { }
1080 #endif /* CONFIG_PPC_SMLPAR */
1081 EXPORT_SYMBOL(vio_cmo_entitlement_update);
1082 EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1083 
1084 
1085 /*
1086  * Platform Facilities Option (PFO) support
1087  */
1088 
1089 /**
1090  * vio_h_cop_sync - Perform a synchronous PFO co-processor operation
1091  *
1092  * @vdev - Pointer to a struct vio_dev for device
1093  * @op - Pointer to a struct vio_pfo_op for the operation parameters
1094  *
1095  * Calls the hypervisor to synchronously perform the PFO operation
1096  * described in @op.  In the case of a busy response from the hypervisor,
1097  * the operation will be re-submitted indefinitely unless a non-zero timeout
1098  * is specified or an error occurs. The timeout places a limit on when to
1099  * stop re-submitting a operation, the total time can be exceeded if an
1100  * operation is in progress.
1101  *
1102  * If op->hcall_ret is not NULL, this will be set to the return from the
1103  * last h_cop_op call or it will be 0 if an error not involving the h_call
1104  * was encountered.
1105  *
1106  * Returns:
1107  *      0 on success,
1108  *      -EINVAL if the h_call fails due to an invalid parameter,
1109  *      -E2BIG if the h_call can not be performed synchronously,
1110  *      -EBUSY if a timeout is specified and has elapsed,
1111  *      -EACCES if the memory area for data/status has been rescinded, or
1112  *      -EPERM if a hardware fault has been indicated
1113  */
1114 int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1115 {
1116         struct device *dev = &vdev->dev;
1117         unsigned long deadline = 0;
1118         long hret = 0;
1119         int ret = 0;
1120 
1121         if (op->timeout)
1122                 deadline = jiffies + msecs_to_jiffies(op->timeout);
1123 
1124         while (true) {
1125                 hret = plpar_hcall_norets(H_COP, op->flags,
1126                                 vdev->resource_id,
1127                                 op->in, op->inlen, op->out,
1128                                 op->outlen, op->csbcpb);
1129 
1130                 if (hret == H_SUCCESS ||
1131                     (hret != H_NOT_ENOUGH_RESOURCES &&
1132                      hret != H_BUSY && hret != H_RESOURCE) ||
1133                     (op->timeout && time_after(deadline, jiffies)))
1134                         break;
1135 
1136                 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1137         }
1138 
1139         switch (hret) {
1140         case H_SUCCESS:
1141                 ret = 0;
1142                 break;
1143         case H_OP_MODE:
1144         case H_TOO_BIG:
1145                 ret = -E2BIG;
1146                 break;
1147         case H_RESCINDED:
1148                 ret = -EACCES;
1149                 break;
1150         case H_HARDWARE:
1151                 ret = -EPERM;
1152                 break;
1153         case H_NOT_ENOUGH_RESOURCES:
1154         case H_RESOURCE:
1155         case H_BUSY:
1156                 ret = -EBUSY;
1157                 break;
1158         default:
1159                 ret = -EINVAL;
1160                 break;
1161         }
1162 
1163         if (ret)
1164                 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1165                                 __func__, ret, hret);
1166 
1167         op->hcall_err = hret;
1168         return ret;
1169 }
1170 EXPORT_SYMBOL(vio_h_cop_sync);
1171 
1172 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1173 {
1174         const __be32 *dma_window;
1175         struct iommu_table *tbl;
1176         unsigned long offset, size;
1177 
1178         dma_window = of_get_property(dev->dev.of_node,
1179                                   "ibm,my-dma-window", NULL);
1180         if (!dma_window)
1181                 return NULL;
1182 
1183         tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1184         if (tbl == NULL)
1185                 return NULL;
1186 
1187         of_parse_dma_window(dev->dev.of_node, dma_window,
1188                             &tbl->it_index, &offset, &size);
1189 
1190         /* TCE table size - measured in tce entries */
1191         tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1192         tbl->it_size = size >> tbl->it_page_shift;
1193         /* offset for VIO should always be 0 */
1194         tbl->it_offset = offset >> tbl->it_page_shift;
1195         tbl->it_busno = 0;
1196         tbl->it_type = TCE_VB;
1197         tbl->it_blocksize = 16;
1198 
1199         return iommu_init_table(tbl, -1);
1200 }
1201 
1202 /**
1203  * vio_match_device: - Tell if a VIO device has a matching
1204  *                      VIO device id structure.
1205  * @ids:        array of VIO device id structures to search in
1206  * @dev:        the VIO device structure to match against
1207  *
1208  * Used by a driver to check whether a VIO device present in the
1209  * system is in its list of supported devices. Returns the matching
1210  * vio_device_id structure or NULL if there is no match.
1211  */
1212 static const struct vio_device_id *vio_match_device(
1213                 const struct vio_device_id *ids, const struct vio_dev *dev)
1214 {
1215         while (ids->type[0] != '\0') {
1216                 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1217                     of_device_is_compatible(dev->dev.of_node,
1218                                          ids->compat))
1219                         return ids;
1220                 ids++;
1221         }
1222         return NULL;
1223 }
1224 
1225 /*
1226  * Convert from struct device to struct vio_dev and pass to driver.
1227  * dev->driver has already been set by generic code because vio_bus_match
1228  * succeeded.
1229  */
1230 static int vio_bus_probe(struct device *dev)
1231 {
1232         struct vio_dev *viodev = to_vio_dev(dev);
1233         struct vio_driver *viodrv = to_vio_driver(dev->driver);
1234         const struct vio_device_id *id;
1235         int error = -ENODEV;
1236 
1237         if (!viodrv->probe)
1238                 return error;
1239 
1240         id = vio_match_device(viodrv->id_table, viodev);
1241         if (id) {
1242                 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1243                 if (firmware_has_feature(FW_FEATURE_CMO)) {
1244                         error = vio_cmo_bus_probe(viodev);
1245                         if (error)
1246                                 return error;
1247                 }
1248                 error = viodrv->probe(viodev, id);
1249                 if (error && firmware_has_feature(FW_FEATURE_CMO))
1250                         vio_cmo_bus_remove(viodev);
1251         }
1252 
1253         return error;
1254 }
1255 
1256 /* convert from struct device to struct vio_dev and pass to driver. */
1257 static int vio_bus_remove(struct device *dev)
1258 {
1259         struct vio_dev *viodev = to_vio_dev(dev);
1260         struct vio_driver *viodrv = to_vio_driver(dev->driver);
1261         struct device *devptr;
1262         int ret = 1;
1263 
1264         /*
1265          * Hold a reference to the device after the remove function is called
1266          * to allow for CMO accounting cleanup for the device.
1267          */
1268         devptr = get_device(dev);
1269 
1270         if (viodrv->remove)
1271                 ret = viodrv->remove(viodev);
1272 
1273         if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1274                 vio_cmo_bus_remove(viodev);
1275 
1276         put_device(devptr);
1277         return ret;
1278 }
1279 
1280 /**
1281  * vio_register_driver: - Register a new vio driver
1282  * @viodrv:     The vio_driver structure to be registered.
1283  */
1284 int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1285                           const char *mod_name)
1286 {
1287         pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1288 
1289         /* fill in 'struct driver' fields */
1290         viodrv->driver.name = viodrv->name;
1291         viodrv->driver.pm = viodrv->pm;
1292         viodrv->driver.bus = &vio_bus_type;
1293         viodrv->driver.owner = owner;
1294         viodrv->driver.mod_name = mod_name;
1295 
1296         return driver_register(&viodrv->driver);
1297 }
1298 EXPORT_SYMBOL(__vio_register_driver);
1299 
1300 /**
1301  * vio_unregister_driver - Remove registration of vio driver.
1302  * @viodrv:     The vio_driver struct to be removed form registration
1303  */
1304 void vio_unregister_driver(struct vio_driver *viodrv)
1305 {
1306         driver_unregister(&viodrv->driver);
1307 }
1308 EXPORT_SYMBOL(vio_unregister_driver);
1309 
1310 /* vio_dev refcount hit 0 */
1311 static void vio_dev_release(struct device *dev)
1312 {
1313         struct iommu_table *tbl = get_iommu_table_base(dev);
1314 
1315         if (tbl)
1316                 iommu_free_table(tbl, of_node_full_name(dev->of_node));
1317         of_node_put(dev->of_node);
1318         kfree(to_vio_dev(dev));
1319 }
1320 
1321 /**
1322  * vio_register_device_node: - Register a new vio device.
1323  * @of_node:    The OF node for this device.
1324  *
1325  * Creates and initializes a vio_dev structure from the data in
1326  * of_node and adds it to the list of virtual devices.
1327  * Returns a pointer to the created vio_dev or NULL if node has
1328  * NULL device_type or compatible fields.
1329  */
1330 struct vio_dev *vio_register_device_node(struct device_node *of_node)
1331 {
1332         struct vio_dev *viodev;
1333         struct device_node *parent_node;
1334         const __be32 *prop;
1335         enum vio_dev_family family;
1336         const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
1337 
1338         /*
1339          * Determine if this node is a under the /vdevice node or under the
1340          * /ibm,platform-facilities node.  This decides the device's family.
1341          */
1342         parent_node = of_get_parent(of_node);
1343         if (parent_node) {
1344                 if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
1345                         family = PFO;
1346                 else if (!strcmp(parent_node->full_name, "/vdevice"))
1347                         family = VDEVICE;
1348                 else {
1349                         pr_warn("%s: parent(%s) of %s not recognized.\n",
1350                                         __func__,
1351                                         parent_node->full_name,
1352                                         of_node_name);
1353                         of_node_put(parent_node);
1354                         return NULL;
1355                 }
1356                 of_node_put(parent_node);
1357         } else {
1358                 pr_warn("%s: could not determine the parent of node %s.\n",
1359                                 __func__, of_node_name);
1360                 return NULL;
1361         }
1362 
1363         if (family == PFO) {
1364                 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1365                         pr_debug("%s: Skipping the interrupt controller %s.\n",
1366                                         __func__, of_node_name);
1367                         return NULL;
1368                 }
1369         }
1370 
1371         /* allocate a vio_dev for this node */
1372         viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1373         if (viodev == NULL) {
1374                 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1375                 return NULL;
1376         }
1377 
1378         /* we need the 'device_type' property, in order to match with drivers */
1379         viodev->family = family;
1380         if (viodev->family == VDEVICE) {
1381                 unsigned int unit_address;
1382 
1383                 if (of_node->type != NULL)
1384                         viodev->type = of_node->type;
1385                 else {
1386                         pr_warn("%s: node %s is missing the 'device_type' "
1387                                         "property.\n", __func__, of_node_name);
1388                         goto out;
1389                 }
1390 
1391                 prop = of_get_property(of_node, "reg", NULL);
1392                 if (prop == NULL) {
1393                         pr_warn("%s: node %s missing 'reg'\n",
1394                                         __func__, of_node_name);
1395                         goto out;
1396                 }
1397                 unit_address = of_read_number(prop, 1);
1398                 dev_set_name(&viodev->dev, "%x", unit_address);
1399                 viodev->irq = irq_of_parse_and_map(of_node, 0);
1400                 viodev->unit_address = unit_address;
1401         } else {
1402                 /* PFO devices need their resource_id for submitting COP_OPs
1403                  * This is an optional field for devices, but is required when
1404                  * performing synchronous ops */
1405                 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1406                 if (prop != NULL)
1407                         viodev->resource_id = of_read_number(prop, 1);
1408 
1409                 dev_set_name(&viodev->dev, "%s", of_node_name);
1410                 viodev->type = of_node_name;
1411                 viodev->irq = 0;
1412         }
1413 
1414         viodev->name = of_node->name;
1415         viodev->dev.of_node = of_node_get(of_node);
1416 
1417         set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1418 
1419         /* init generic 'struct device' fields: */
1420         viodev->dev.parent = &vio_bus_device.dev;
1421         viodev->dev.bus = &vio_bus_type;
1422         viodev->dev.release = vio_dev_release;
1423 
1424         if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1425                 if (firmware_has_feature(FW_FEATURE_CMO))
1426                         vio_cmo_set_dma_ops(viodev);
1427                 else
1428                         set_dma_ops(&viodev->dev, &dma_iommu_ops);
1429 
1430                 set_iommu_table_base(&viodev->dev,
1431                                      vio_build_iommu_table(viodev));
1432 
1433                 /* needed to ensure proper operation of coherent allocations
1434                  * later, in case driver doesn't set it explicitly */
1435                 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1436                 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1437         }
1438 
1439         /* register with generic device framework */
1440         if (device_register(&viodev->dev)) {
1441                 printk(KERN_ERR "%s: failed to register device %s\n",
1442                                 __func__, dev_name(&viodev->dev));
1443                 put_device(&viodev->dev);
1444                 return NULL;
1445         }
1446 
1447         return viodev;
1448 
1449 out:    /* Use this exit point for any return prior to device_register */
1450         kfree(viodev);
1451 
1452         return NULL;
1453 }
1454 EXPORT_SYMBOL(vio_register_device_node);
1455 
1456 /*
1457  * vio_bus_scan_for_devices - Scan OF and register each child device
1458  * @root_name - OF node name for the root of the subtree to search.
1459  *              This must be non-NULL
1460  *
1461  * Starting from the root node provide, register the device node for
1462  * each child beneath the root.
1463  */
1464 static void vio_bus_scan_register_devices(char *root_name)
1465 {
1466         struct device_node *node_root, *node_child;
1467 
1468         if (!root_name)
1469                 return;
1470 
1471         node_root = of_find_node_by_name(NULL, root_name);
1472         if (node_root) {
1473 
1474                 /*
1475                  * Create struct vio_devices for each virtual device in
1476                  * the device tree. Drivers will associate with them later.
1477                  */
1478                 node_child = of_get_next_child(node_root, NULL);
1479                 while (node_child) {
1480                         vio_register_device_node(node_child);
1481                         node_child = of_get_next_child(node_root, node_child);
1482                 }
1483                 of_node_put(node_root);
1484         }
1485 }
1486 
1487 /**
1488  * vio_bus_init: - Initialize the virtual IO bus
1489  */
1490 static int __init vio_bus_init(void)
1491 {
1492         int err;
1493 
1494         if (firmware_has_feature(FW_FEATURE_CMO))
1495                 vio_cmo_sysfs_init();
1496 
1497         err = bus_register(&vio_bus_type);
1498         if (err) {
1499                 printk(KERN_ERR "failed to register VIO bus\n");
1500                 return err;
1501         }
1502 
1503         /*
1504          * The fake parent of all vio devices, just to give us
1505          * a nice directory
1506          */
1507         err = device_register(&vio_bus_device.dev);
1508         if (err) {
1509                 printk(KERN_WARNING "%s: device_register returned %i\n",
1510                                 __func__, err);
1511                 return err;
1512         }
1513 
1514         if (firmware_has_feature(FW_FEATURE_CMO))
1515                 vio_cmo_bus_init();
1516 
1517         return 0;
1518 }
1519 postcore_initcall(vio_bus_init);
1520 
1521 static int __init vio_device_init(void)
1522 {
1523         vio_bus_scan_register_devices("vdevice");
1524         vio_bus_scan_register_devices("ibm,platform-facilities");
1525 
1526         return 0;
1527 }
1528 device_initcall(vio_device_init);
1529 
1530 static ssize_t name_show(struct device *dev,
1531                 struct device_attribute *attr, char *buf)
1532 {
1533         return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1534 }
1535 
1536 static ssize_t devspec_show(struct device *dev,
1537                 struct device_attribute *attr, char *buf)
1538 {
1539         struct device_node *of_node = dev->of_node;
1540 
1541         return sprintf(buf, "%s\n", of_node_full_name(of_node));
1542 }
1543 
1544 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1545                              char *buf)
1546 {
1547         const struct vio_dev *vio_dev = to_vio_dev(dev);
1548         struct device_node *dn;
1549         const char *cp;
1550 
1551         dn = dev->of_node;
1552         if (!dn) {
1553                 strcpy(buf, "\n");
1554                 return strlen(buf);
1555         }
1556         cp = of_get_property(dn, "compatible", NULL);
1557         if (!cp) {
1558                 strcpy(buf, "\n");
1559                 return strlen(buf);
1560         }
1561 
1562         return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1563 }
1564 
1565 static struct device_attribute vio_dev_attrs[] = {
1566         __ATTR_RO(name),
1567         __ATTR_RO(devspec),
1568         __ATTR_RO(modalias),
1569         __ATTR_NULL
1570 };
1571 
1572 void vio_unregister_device(struct vio_dev *viodev)
1573 {
1574         device_unregister(&viodev->dev);
1575 }
1576 EXPORT_SYMBOL(vio_unregister_device);
1577 
1578 static int vio_bus_match(struct device *dev, struct device_driver *drv)
1579 {
1580         const struct vio_dev *vio_dev = to_vio_dev(dev);
1581         struct vio_driver *vio_drv = to_vio_driver(drv);
1582         const struct vio_device_id *ids = vio_drv->id_table;
1583 
1584         return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1585 }
1586 
1587 static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1588 {
1589         const struct vio_dev *vio_dev = to_vio_dev(dev);
1590         struct device_node *dn;
1591         const char *cp;
1592 
1593         dn = dev->of_node;
1594         if (!dn)
1595                 return -ENODEV;
1596         cp = of_get_property(dn, "compatible", NULL);
1597         if (!cp)
1598                 return -ENODEV;
1599 
1600         add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1601         return 0;
1602 }
1603 
1604 struct bus_type vio_bus_type = {
1605         .name = "vio",
1606         .dev_attrs = vio_dev_attrs,
1607         .uevent = vio_hotplug,
1608         .match = vio_bus_match,
1609         .probe = vio_bus_probe,
1610         .remove = vio_bus_remove,
1611 };
1612 
1613 /**
1614  * vio_get_attribute: - get attribute for virtual device
1615  * @vdev:       The vio device to get property.
1616  * @which:      The property/attribute to be extracted.
1617  * @length:     Pointer to length of returned data size (unused if NULL).
1618  *
1619  * Calls prom.c's of_get_property() to return the value of the
1620  * attribute specified by @which
1621 */
1622 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1623 {
1624         return of_get_property(vdev->dev.of_node, which, length);
1625 }
1626 EXPORT_SYMBOL(vio_get_attribute);
1627 
1628 #ifdef CONFIG_PPC_PSERIES
1629 /* vio_find_name() - internal because only vio.c knows how we formatted the
1630  * kobject name
1631  */
1632 static struct vio_dev *vio_find_name(const char *name)
1633 {
1634         struct device *found;
1635 
1636         found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1637         if (!found)
1638                 return NULL;
1639 
1640         return to_vio_dev(found);
1641 }
1642 
1643 /**
1644  * vio_find_node - find an already-registered vio_dev
1645  * @vnode: device_node of the virtual device we're looking for
1646  */
1647 struct vio_dev *vio_find_node(struct device_node *vnode)
1648 {
1649         char kobj_name[20];
1650         struct device_node *vnode_parent;
1651         const char *dev_type;
1652 
1653         vnode_parent = of_get_parent(vnode);
1654         if (!vnode_parent)
1655                 return NULL;
1656 
1657         dev_type = of_get_property(vnode_parent, "device_type", NULL);
1658         of_node_put(vnode_parent);
1659         if (!dev_type)
1660                 return NULL;
1661 
1662         /* construct the kobject name from the device node */
1663         if (!strcmp(dev_type, "vdevice")) {
1664                 const __be32 *prop;
1665                 
1666                 prop = of_get_property(vnode, "reg", NULL);
1667                 if (!prop)
1668                         return NULL;
1669                 snprintf(kobj_name, sizeof(kobj_name), "%x",
1670                          (uint32_t)of_read_number(prop, 1));
1671         } else if (!strcmp(dev_type, "ibm,platform-facilities"))
1672                 snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
1673         else
1674                 return NULL;
1675 
1676         return vio_find_name(kobj_name);
1677 }
1678 EXPORT_SYMBOL(vio_find_node);
1679 
1680 int vio_enable_interrupts(struct vio_dev *dev)
1681 {
1682         int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1683         if (rc != H_SUCCESS)
1684                 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1685         return rc;
1686 }
1687 EXPORT_SYMBOL(vio_enable_interrupts);
1688 
1689 int vio_disable_interrupts(struct vio_dev *dev)
1690 {
1691         int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1692         if (rc != H_SUCCESS)
1693                 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1694         return rc;
1695 }
1696 EXPORT_SYMBOL(vio_disable_interrupts);
1697 #endif /* CONFIG_PPC_PSERIES */
1698 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp