~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/ia64/sn/io/sn2/pcibr/pcibr_dvr.c

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.3 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.30 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.107 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.164 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *
  3  * This file is subject to the terms and conditions of the GNU General Public
  4  * License.  See the file "COPYING" in the main directory of this archive
  5  * for more details.
  6  *
  7  * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
  8  */
  9 
 10 #include <linux/types.h>
 11 #include <linux/slab.h>
 12 #include <linux/module.h>
 13 #include <linux/string.h>
 14 #include <linux/interrupt.h>
 15 #include <linux/ioport.h>
 16 #include <asm/sn/sgi.h>
 17 #include <asm/sn/sn_sal.h>
 18 #include <asm/sn/sn_cpuid.h>
 19 #include <asm/sn/addrs.h>
 20 #include <asm/sn/arch.h>
 21 #include <asm/sn/iograph.h>
 22 #include <asm/sn/invent.h>
 23 #include <asm/sn/hcl.h>
 24 #include <asm/sn/labelcl.h>
 25 #include <asm/sn/klconfig.h>
 26 #include <asm/sn/xtalk/xwidget.h>
 27 #include <asm/sn/pci/bridge.h>
 28 #include <asm/sn/pci/pciio.h>
 29 #include <asm/sn/pci/pcibr.h>
 30 #include <asm/sn/pci/pcibr_private.h>
 31 #include <asm/sn/pci/pci_defs.h>
 32 #include <asm/sn/prio.h>
 33 #include <asm/sn/xtalk/xbow.h>
 34 #include <asm/sn/io.h>
 35 #include <asm/sn/sn_private.h>
 36 
 37 /*
 38  * global variables to toggle the different levels of pcibr debugging.  
 39  *   -pcibr_debug_mask is the mask of the different types of debugging
 40  *    you want to enable.  See sys/PCI/pcibr_private.h 
 41  *   -pcibr_debug_module is the module you want to trace.  By default
 42  *    all modules are trace.  For IP35 this value has the format of
 43  *    something like "001c10".  For IP27 this value is a node number,
 44  *    i.e. "1", "2"...  For IP30 this is undefined and should be set to
 45  *    'all'.
 46  *   -pcibr_debug_widget is the widget you want to trace.  For IP27
 47  *    the widget isn't exposed in the hwpath so use the xio slot num.
 48  *    i.e. for 'io2' set pcibr_debug_widget to "2".
 49  *   -pcibr_debug_slot is the pci slot you want to trace.
 50  */
 51 uint32_t pcibr_debug_mask = 0x0;        /* 0x00000000 to disable */
 52 char      *pcibr_debug_module = "all";          /* 'all' for all modules */
 53 int        pcibr_debug_widget = -1;             /* '-1' for all widgets  */
 54 int        pcibr_debug_slot = -1;               /* '-1' for all slots    */
 55 
 56 /*
 57  * Macros related to the Lucent USS 302/312 usb timeout workaround.  It
 58  * appears that if the lucent part can get into a retry loop if it sees a
 59  * DAC on the bus during a pio read retry.  The loop is broken after about
 60  * 1ms, so we need to set up bridges holding this part to allow at least
 61  * 1ms for pio.
 62  */
 63 
 64 #define USS302_TIMEOUT_WAR
 65 
 66 #ifdef USS302_TIMEOUT_WAR
 67 #define LUCENT_USBHC_VENDOR_ID_NUM      0x11c1
 68 #define LUCENT_USBHC302_DEVICE_ID_NUM   0x5801
 69 #define LUCENT_USBHC312_DEVICE_ID_NUM   0x5802
 70 #define USS302_BRIDGE_TIMEOUT_HLD       4
 71 #endif
 72 
 73 /* kbrick widgetnum-to-bus layout */
 74 int p_busnum[MAX_PORT_NUM] = {                  /* widget#      */
 75         0, 0, 0, 0, 0, 0, 0, 0,                 /* 0x0 - 0x7    */
 76         2,                                      /* 0x8          */
 77         1,                                      /* 0x9          */
 78         0, 0,                                   /* 0xa - 0xb    */
 79         5,                                      /* 0xc          */
 80         6,                                      /* 0xd          */
 81         4,                                      /* 0xe          */
 82         3,                                      /* 0xf          */
 83 };
 84 
 85 #if PCIBR_SOFT_LIST
 86 pcibr_list_p            pcibr_list = 0;
 87 #endif
 88 
 89 extern int              hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, uint buflen);
 90 extern long             atoi(register char *p);
 91 extern cnodeid_t        nodevertex_to_cnodeid(vertex_hdl_t vhdl);
 92 extern char             *dev_to_name(vertex_hdl_t dev, char *buf, uint buflen);
 93 extern struct map       *atemapalloc(uint64_t);
 94 extern void             atefree(struct map *, size_t, uint64_t);
 95 extern void             atemapfree(struct map *);
 96 extern pciio_dmamap_t   get_free_pciio_dmamap(vertex_hdl_t);
 97 extern void             free_pciio_dmamap(pcibr_dmamap_t);
 98 extern void             xwidget_error_register(vertex_hdl_t, error_handler_f *, error_handler_arg_t);
 99 
100 #define ATE_WRITE()    ate_write(pcibr_soft, ate_ptr, ate_count, ate)
101 #if PCIBR_FREEZE_TIME
102 #define ATE_FREEZE()    s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
103 #else
104 #define ATE_FREEZE()    s = ate_freeze(pcibr_dmamap, cmd_regs)
105 #endif /* PCIBR_FREEZE_TIME */
106 
107 #if PCIBR_FREEZE_TIME
108 #define ATE_THAW()      ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
109 #else
110 #define ATE_THAW()      ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
111 #endif
112 
113 /* =====================================================================
114  *    Function Table of Contents
115  *
116  *      The order of functions in this file has stopped
117  *      making much sense. We might want to take a look
118  *      at it some time and bring back some sanity, or
119  *      perhaps bust this file into smaller chunks.
120  */
121 
122 extern int               do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
123 extern void              do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
124 
125 extern int               pcibr_wrb_flush(vertex_hdl_t);
126 extern int               pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
127 extern void              pcibr_rrb_flush(vertex_hdl_t);
128 
129 static int                pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
130 void                     pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
131 
132 extern void              pcibr_setwidint(xtalk_intr_t);
133 extern void              pcibr_clearwidint(bridge_t *);
134 
135 extern iopaddr_t         pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
136                                               pciio_space_t, int, int, int);
137 
138 int                      pcibr_attach(vertex_hdl_t);
139 int                      pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t,
140                                        int, pcibr_soft_t *);
141 int                      pcibr_detach(vertex_hdl_t);
142 int                      pcibr_pcix_rbars_calc(pcibr_soft_t);
143 extern int               pcibr_init_ext_ate_ram(bridge_t *);
144 extern int               pcibr_ate_alloc(pcibr_soft_t, int);
145 extern void              pcibr_ate_free(pcibr_soft_t, int, int);
146 extern int               pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
147 
148 extern unsigned ate_freeze(pcibr_dmamap_t pcibr_dmamap,
149 #if PCIBR_FREEZE_TIME
150                          unsigned *freeze_time_ptr,
151 #endif
152                          unsigned *cmd_regs);
153 extern void ate_write(pcibr_soft_t pcibr_soft, bridge_ate_p ate_ptr, int ate_count, bridge_ate_t ate);
154 extern void ate_thaw(pcibr_dmamap_t pcibr_dmamap, int ate_index,
155 #if PCIBR_FREEZE_TIME
156                         bridge_ate_t ate,
157                         int ate_total,
158                         unsigned freeze_time_start,
159 #endif
160                         unsigned *cmd_regs,
161                         unsigned s);
162 
163 pcibr_info_t      pcibr_info_get(vertex_hdl_t);
164 
165 static iopaddr_t         pcibr_addr_pci_to_xio(vertex_hdl_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
166 
167 pcibr_piomap_t          pcibr_piomap_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
168 void                    pcibr_piomap_free(pcibr_piomap_t);
169 caddr_t                 pcibr_piomap_addr(pcibr_piomap_t, iopaddr_t, size_t);
170 void                    pcibr_piomap_done(pcibr_piomap_t);
171 caddr_t                 pcibr_piotrans_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
172 iopaddr_t               pcibr_piospace_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, size_t, size_t);
173 void                    pcibr_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
174 
175 static iopaddr_t         pcibr_flags_to_d64(unsigned, pcibr_soft_t);
176 extern bridge_ate_t     pcibr_flags_to_ate(unsigned);
177 
178 pcibr_dmamap_t          pcibr_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
179 void                    pcibr_dmamap_free(pcibr_dmamap_t);
180 extern bridge_ate_p     pcibr_ate_addr(pcibr_soft_t, int);
181 static iopaddr_t         pcibr_addr_xio_to_pci(pcibr_soft_t, iopaddr_t, size_t);
182 iopaddr_t               pcibr_dmamap_addr(pcibr_dmamap_t, paddr_t, size_t);
183 void                    pcibr_dmamap_done(pcibr_dmamap_t);
184 cnodeid_t               pcibr_get_dmatrans_node(vertex_hdl_t);
185 iopaddr_t               pcibr_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
186 void                    pcibr_dmamap_drain(pcibr_dmamap_t);
187 void                    pcibr_dmaaddr_drain(vertex_hdl_t, paddr_t, size_t);
188 void                    pcibr_dmalist_drain(vertex_hdl_t, alenlist_t);
189 iopaddr_t               pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
190 
191 extern unsigned         pcibr_intr_bits(pciio_info_t info, 
192                                         pciio_intr_line_t lines, int nslots);
193 extern pcibr_intr_t     pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
194 extern void             pcibr_intr_free(pcibr_intr_t);
195 extern void             pcibr_setpciint(xtalk_intr_t);
196 extern int              pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
197 extern void             pcibr_intr_disconnect(pcibr_intr_t);
198 
199 extern vertex_hdl_t     pcibr_intr_cpu_get(pcibr_intr_t);
200 extern void             pcibr_intr_func(intr_arg_t);
201 
202 extern void             print_bridge_errcmd(uint32_t, char *);
203 
204 extern void             pcibr_error_dump(pcibr_soft_t);
205 extern uint32_t       pcibr_errintr_group(uint32_t);
206 extern void             pcibr_pioerr_check(pcibr_soft_t);
207 extern void             pcibr_error_intr_handler(int, void *, struct pt_regs *);
208 
209 extern int              pcibr_addr_toslot(pcibr_soft_t, iopaddr_t, pciio_space_t *, iopaddr_t *, pciio_function_t *);
210 extern void             pcibr_error_cleanup(pcibr_soft_t, int);
211 extern void                    pcibr_device_disable(pcibr_soft_t, int);
212 extern int              pcibr_pioerror(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
213 extern int              pcibr_dmard_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
214 extern int              pcibr_dmawr_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
215 extern int              pcibr_error_handler(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
216 extern int              pcibr_error_handler_wrapper(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
217 void                    pcibr_provider_startup(vertex_hdl_t);
218 void                    pcibr_provider_shutdown(vertex_hdl_t);
219 
220 int                     pcibr_reset(vertex_hdl_t);
221 pciio_endian_t          pcibr_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
222 int                     pcibr_priority_bits_set(pcibr_soft_t, pciio_slot_t, pciio_priority_t);
223 pciio_priority_t        pcibr_priority_set(vertex_hdl_t, pciio_priority_t);
224 int                     pcibr_device_flags_set(vertex_hdl_t, pcibr_device_flags_t);
225 
226 extern cfg_p            pcibr_config_addr(vertex_hdl_t, unsigned);
227 extern uint64_t         pcibr_config_get(vertex_hdl_t, unsigned, unsigned);
228 extern void             pcibr_config_set(vertex_hdl_t, unsigned, unsigned, uint64_t);
229 
230 extern pcibr_hints_t    pcibr_hints_get(vertex_hdl_t, int);
231 extern void             pcibr_hints_fix_rrbs(vertex_hdl_t);
232 extern void             pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
233 extern void             pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
234 extern void             pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
235 extern void             pcibr_hints_handsoff(vertex_hdl_t);
236 extern void             pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
237 
238 extern int              pcibr_slot_info_init(vertex_hdl_t,pciio_slot_t);
239 extern int              pcibr_slot_info_free(vertex_hdl_t,pciio_slot_t);
240 extern int              pcibr_slot_info_return(pcibr_soft_t, pciio_slot_t,
241                                                pcibr_slot_info_resp_t);
242 extern void             pcibr_slot_func_info_return(pcibr_info_h, int,
243                                                     pcibr_slot_func_info_resp_t);
244 extern int              pcibr_slot_addr_space_init(vertex_hdl_t,pciio_slot_t);
245 extern int              pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
246 extern int              pcibr_slot_device_init(vertex_hdl_t, pciio_slot_t);
247 extern int              pcibr_slot_guest_info_init(vertex_hdl_t,pciio_slot_t);
248 extern int              pcibr_slot_call_device_attach(vertex_hdl_t,
249                                                       pciio_slot_t, int);
250 extern int              pcibr_slot_call_device_detach(vertex_hdl_t,
251                                                       pciio_slot_t, int);
252 extern int              pcibr_slot_attach(vertex_hdl_t, pciio_slot_t, int, 
253                                                       char *, int *);
254 extern int              pcibr_slot_detach(vertex_hdl_t, pciio_slot_t, int,
255                                                       char *, int *);
256 
257 extern int              pcibr_slot_initial_rrb_alloc(vertex_hdl_t, pciio_slot_t);
258 extern int              pcibr_initial_rrb(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
259 
260 /* =====================================================================
261  *    Device(x) register management
262  */
263 
264 /* pcibr_try_set_device: attempt to modify Device(x)
265  * for the specified slot on the specified bridge
266  * as requested in flags, limited to the specified
267  * bits. Returns which BRIDGE bits were in conflict,
268  * or ZERO if everything went OK.
269  *
270  * Caller MUST hold pcibr_lock when calling this function.
271  */
272 static int
273 pcibr_try_set_device(pcibr_soft_t pcibr_soft,
274                      pciio_slot_t slot,
275                      unsigned flags,
276                      bridgereg_t mask)
277 {
278     bridge_t               *bridge;
279     pcibr_soft_slot_t       slotp;
280     bridgereg_t             old;
281     bridgereg_t             new;
282     bridgereg_t             chg;
283     bridgereg_t             bad;
284     bridgereg_t             badpmu;
285     bridgereg_t             badd32;
286     bridgereg_t             badd64;
287     bridgereg_t             fix;
288     unsigned long           s;
289     bridgereg_t             xmask;
290 
291     xmask = mask;
292     if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
293         if (mask == BRIDGE_DEV_PMU_BITS)
294                 xmask = XBRIDGE_DEV_PMU_BITS;
295         if (mask == BRIDGE_DEV_D64_BITS)
296                 xmask = XBRIDGE_DEV_D64_BITS;
297     }
298 
299     slotp = &pcibr_soft->bs_slot[slot];
300 
301     s = pcibr_lock(pcibr_soft);
302 
303     bridge = pcibr_soft->bs_base;
304 
305     old = slotp->bss_device;
306 
307     /* figure out what the desired
308      * Device(x) bits are based on
309      * the flags specified.
310      */
311 
312     new = old;
313 
314     /* Currently, we inherit anything that
315      * the new caller has not specified in
316      * one way or another, unless we take
317      * action here to not inherit.
318      *
319      * This is needed for the "swap" stuff,
320      * since it could have been set via
321      * pcibr_endian_set -- altho note that
322      * any explicit PCIBR_BYTE_STREAM or
323      * PCIBR_WORD_VALUES will freely override
324      * the effect of that call (and vice
325      * versa, no protection either way).
326      *
327      * I want to get rid of pcibr_endian_set
328      * in favor of tracking DMA endianness
329      * using the flags specified when DMA
330      * channels are created.
331      */
332 
333 #define BRIDGE_DEV_WRGA_BITS    (BRIDGE_DEV_PMU_WRGA_EN | BRIDGE_DEV_DIR_WRGA_EN)
334 #define BRIDGE_DEV_SWAP_BITS    (BRIDGE_DEV_SWAP_PMU | BRIDGE_DEV_SWAP_DIR)
335 
336     /* Do not use Barrier, Write Gather,
337      * or Prefetch unless asked.
338      * Leave everything else as it
339      * was from the last time.
340      */
341     new = new
342         & ~BRIDGE_DEV_BARRIER
343         & ~BRIDGE_DEV_WRGA_BITS
344         & ~BRIDGE_DEV_PREF
345         ;
346 
347     /* Generic macro flags
348      */
349     if (flags & PCIIO_DMA_DATA) {
350         new = (new
351             & ~BRIDGE_DEV_BARRIER)      /* barrier off */
352             | BRIDGE_DEV_PREF;          /* prefetch on */
353 
354     }
355     if (flags & PCIIO_DMA_CMD) {
356         new = ((new
357             & ~BRIDGE_DEV_PREF)         /* prefetch off */
358             & ~BRIDGE_DEV_WRGA_BITS)    /* write gather off */
359             | BRIDGE_DEV_BARRIER;       /* barrier on */
360     }
361     /* Generic detail flags
362      */
363     if (flags & PCIIO_WRITE_GATHER)
364         new |= BRIDGE_DEV_WRGA_BITS;
365     if (flags & PCIIO_NOWRITE_GATHER)
366         new &= ~BRIDGE_DEV_WRGA_BITS;
367 
368     if (flags & PCIIO_PREFETCH)
369         new |= BRIDGE_DEV_PREF;
370     if (flags & PCIIO_NOPREFETCH)
371         new &= ~BRIDGE_DEV_PREF;
372 
373     if (flags & PCIBR_WRITE_GATHER)
374         new |= BRIDGE_DEV_WRGA_BITS;
375     if (flags & PCIBR_NOWRITE_GATHER)
376         new &= ~BRIDGE_DEV_WRGA_BITS;
377 
378     if (flags & PCIIO_BYTE_STREAM)
379         new |= (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) ? 
380                         BRIDGE_DEV_SWAP_DIR : BRIDGE_DEV_SWAP_BITS;
381     if (flags & PCIIO_WORD_VALUES)
382         new &= (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) ? 
383                         ~BRIDGE_DEV_SWAP_DIR : ~BRIDGE_DEV_SWAP_BITS;
384 
385     /* Provider-specific flags
386      */
387     if (flags & PCIBR_PREFETCH)
388         new |= BRIDGE_DEV_PREF;
389     if (flags & PCIBR_NOPREFETCH)
390         new &= ~BRIDGE_DEV_PREF;
391 
392     if (flags & PCIBR_PRECISE)
393         new |= BRIDGE_DEV_PRECISE;
394     if (flags & PCIBR_NOPRECISE)
395         new &= ~BRIDGE_DEV_PRECISE;
396 
397     if (flags & PCIBR_BARRIER)
398         new |= BRIDGE_DEV_BARRIER;
399     if (flags & PCIBR_NOBARRIER)
400         new &= ~BRIDGE_DEV_BARRIER;
401 
402     if (flags & PCIBR_64BIT)
403         new |= BRIDGE_DEV_DEV_SIZE;
404     if (flags & PCIBR_NO64BIT)
405         new &= ~BRIDGE_DEV_DEV_SIZE;
406 
407     /*
408      * PIC BRINGUP WAR (PV# 855271):
409      * Allow setting BRIDGE_DEV_VIRTUAL_EN on PIC iff we're a 64-bit
410      * device.  The bit is only intended for 64-bit devices and, on
411      * PIC, can cause problems for 32-bit devices.
412      */
413     if (IS_PIC_SOFT(pcibr_soft) && mask == BRIDGE_DEV_D64_BITS &&
414                                 PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
415         if (flags & PCIBR_VCHAN1) {
416                 new |= BRIDGE_DEV_VIRTUAL_EN;
417                 xmask |= BRIDGE_DEV_VIRTUAL_EN;
418         }
419     }
420 
421 
422     chg = old ^ new;                            /* what are we changing, */
423     chg &= xmask;                               /* of the interesting bits */
424 
425     if (chg) {
426 
427         badd32 = slotp->bss_d32_uctr ? (BRIDGE_DEV_D32_BITS & chg) : 0;
428         if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
429                 badpmu = slotp->bss_pmu_uctr ? (XBRIDGE_DEV_PMU_BITS & chg) : 0;
430                 badd64 = slotp->bss_d64_uctr ? (XBRIDGE_DEV_D64_BITS & chg) : 0;
431         } else {
432                 badpmu = slotp->bss_pmu_uctr ? (BRIDGE_DEV_PMU_BITS & chg) : 0;
433                 badd64 = slotp->bss_d64_uctr ? (BRIDGE_DEV_D64_BITS & chg) : 0;
434         }
435         bad = badpmu | badd32 | badd64;
436 
437         if (bad) {
438 
439             /* some conflicts can be resolved by
440              * forcing the bit on. this may cause
441              * some performance degredation in
442              * the stream(s) that want the bit off,
443              * but the alternative is not allowing
444              * the new stream at all.
445              */
446             if ( (fix = bad & (BRIDGE_DEV_PRECISE |
447                              BRIDGE_DEV_BARRIER)) ) {
448                 bad &= ~fix;
449                 /* don't change these bits if
450                  * they are already set in "old"
451                  */
452                 chg &= ~(fix & old);
453             }
454             /* some conflicts can be resolved by
455              * forcing the bit off. this may cause
456              * some performance degredation in
457              * the stream(s) that want the bit on,
458              * but the alternative is not allowing
459              * the new stream at all.
460              */
461             if ( (fix = bad & (BRIDGE_DEV_WRGA_BITS |
462                              BRIDGE_DEV_PREF)) ) {
463                 bad &= ~fix;
464                 /* don't change these bits if
465                  * we wanted to turn them on.
466                  */
467                 chg &= ~(fix & new);
468             }
469             /* conflicts in other bits mean
470              * we can not establish this DMA
471              * channel while the other(s) are
472              * still present.
473              */
474             if (bad) {
475                 pcibr_unlock(pcibr_soft, s);
476                 return bad;
477             }
478         }
479     }
480     if (mask == BRIDGE_DEV_PMU_BITS)
481         slotp->bss_pmu_uctr++;
482     if (mask == BRIDGE_DEV_D32_BITS)
483         slotp->bss_d32_uctr++;
484     if (mask == BRIDGE_DEV_D64_BITS)
485         slotp->bss_d64_uctr++;
486 
487     /* the value we want to write is the
488      * original value, with the bits for
489      * our selected changes flipped, and
490      * with any disabled features turned off.
491      */
492     new = old ^ chg;                    /* only change what we want to change */
493 
494     if (slotp->bss_device == new) {
495         pcibr_unlock(pcibr_soft, s);
496         return 0;
497     }
498     if ( IS_PIC_SOFT(pcibr_soft) ) {
499         bridge->b_device[slot].reg = new;
500         slotp->bss_device = new;
501         bridge->b_wid_tflush;           /* wait until Bridge PIO complete */
502     }
503     else {
504         if (io_get_sh_swapper(NASID_GET(bridge))) {
505                 BRIDGE_REG_SET32((&bridge->b_device[slot].reg)) = __swab32(new);
506                 slotp->bss_device = new;
507                 BRIDGE_REG_GET32((&bridge->b_wid_tflush));  /* wait until Bridge PIO complete */
508         } else {
509                 bridge->b_device[slot].reg = new;
510                 slotp->bss_device = new;
511                 bridge->b_wid_tflush;               /* wait until Bridge PIO complete */
512         }
513     }
514     pcibr_unlock(pcibr_soft, s);
515 
516     printk("pcibr_try_set_device: Device(%d): %x\n", slot, new);
517     return 0;
518 }
519 
520 void
521 pcibr_release_device(pcibr_soft_t pcibr_soft,
522                      pciio_slot_t slot,
523                      bridgereg_t mask)
524 {
525     pcibr_soft_slot_t       slotp;
526     unsigned long           s;
527 
528     slotp = &pcibr_soft->bs_slot[slot];
529 
530     s = pcibr_lock(pcibr_soft);
531 
532     if (mask == BRIDGE_DEV_PMU_BITS)
533         slotp->bss_pmu_uctr--;
534     if (mask == BRIDGE_DEV_D32_BITS)
535         slotp->bss_d32_uctr--;
536     if (mask == BRIDGE_DEV_D64_BITS)
537         slotp->bss_d64_uctr--;
538 
539     pcibr_unlock(pcibr_soft, s);
540 }
541 
542 /*
543  * flush write gather buffer for slot
544  */
545 static void
546 pcibr_device_write_gather_flush(pcibr_soft_t pcibr_soft,
547               pciio_slot_t slot)
548 {
549     bridge_t               *bridge;
550     unsigned long          s;
551     volatile uint32_t     wrf;
552     s = pcibr_lock(pcibr_soft);
553     bridge = pcibr_soft->bs_base;
554 
555     if ( IS_PIC_SOFT(pcibr_soft) ) {
556         wrf = bridge->b_wr_req_buf[slot].reg;
557     }
558     else {
559         if (io_get_sh_swapper(NASID_GET(bridge))) {
560                 wrf = BRIDGE_REG_GET32((&bridge->b_wr_req_buf[slot].reg));
561         } else {
562                 wrf = bridge->b_wr_req_buf[slot].reg;
563         }
564     }
565     pcibr_unlock(pcibr_soft, s);
566 }
567 
568 /* =====================================================================
569  *    Bridge (pcibr) "Device Driver" entry points
570  */
571 
572 
573 static int
574 pcibr_mmap(struct file * file, struct vm_area_struct * vma)
575 {
576         vertex_hdl_t            pcibr_vhdl = file->f_dentry->d_fsdata;
577         pcibr_soft_t            pcibr_soft;
578         bridge_t               *bridge;
579         unsigned long           phys_addr;
580         int                     error = 0;
581 
582         pcibr_soft = pcibr_soft_get(pcibr_vhdl);
583         bridge = pcibr_soft->bs_base;
584         phys_addr = (unsigned long)bridge & ~0xc000000000000000; /* Mask out the Uncache bits */
585         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
586         vma->vm_flags |= VM_RESERVED | VM_IO;
587         error = io_remap_page_range(vma, phys_addr, vma->vm_start,
588                                     vma->vm_end - vma->vm_start,
589                                     vma->vm_page_prot);
590         return(error);
591 }
592 
593 /*
594  * This is the file operation table for the pcibr driver.
595  * As each of the functions are implemented, put the
596  * appropriate function name below.
597  */
598 static int pcibr_mmap(struct file * file, struct vm_area_struct * vma);
599 struct file_operations pcibr_fops = {
600         .owner          = THIS_MODULE,
601         .mmap           = pcibr_mmap,
602 };
603 
604 
605 /* This is special case code used by grio. There are plans to make
606  * this a bit more general in the future, but till then this should
607  * be sufficient.
608  */
609 pciio_slot_t
610 pcibr_device_slot_get(vertex_hdl_t dev_vhdl)
611 {
612     char                    devname[MAXDEVNAME];
613     vertex_hdl_t            tdev;
614     pciio_info_t            pciio_info;
615     pciio_slot_t            slot = PCIIO_SLOT_NONE;
616 
617     vertex_to_name(dev_vhdl, devname, MAXDEVNAME);
618 
619     /* run back along the canonical path
620      * until we find a PCI connection point.
621      */
622     tdev = hwgraph_connectpt_get(dev_vhdl);
623     while (tdev != GRAPH_VERTEX_NONE) {
624         pciio_info = pciio_info_chk(tdev);
625         if (pciio_info) {
626             slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
627             break;
628         }
629         hwgraph_vertex_unref(tdev);
630         tdev = hwgraph_connectpt_get(tdev);
631     }
632     hwgraph_vertex_unref(tdev);
633 
634     return slot;
635 }
636 
637 pcibr_info_t
638 pcibr_info_get(vertex_hdl_t vhdl)
639 {
640     return (pcibr_info_t) pciio_info_get(vhdl);
641 }
642 
643 pcibr_info_t
644 pcibr_device_info_new(
645                          pcibr_soft_t pcibr_soft,
646                          pciio_slot_t slot,
647                          pciio_function_t rfunc,
648                          pciio_vendor_id_t vendor,
649                          pciio_device_id_t device)
650 {
651     pcibr_info_t            pcibr_info;
652     pciio_function_t        func;
653     int                     ibit;
654 
655     func = (rfunc == PCIIO_FUNC_NONE) ? 0 : rfunc;
656 
657     /*
658      * Create a pciio_info_s for this device.  pciio_device_info_new()
659      * will set the c_slot (which is suppose to represent the external
660      * slot (i.e the slot number silk screened on the back of the I/O
661      * brick)).  So for PIC we need to adjust this "internal slot" num
662      * passed into us, into its external representation.  See comment
663      * for the PCIBR_DEVICE_TO_SLOT macro for more information.
664      */
665     NEW(pcibr_info);
666     pciio_device_info_new(&pcibr_info->f_c, pcibr_soft->bs_vhdl,
667                           PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
668                           rfunc, vendor, device);
669     pcibr_info->f_dev = slot;
670 
671     /* Set PCI bus number */
672     pcibr_info->f_bus = pcibr_widget_to_bus(pcibr_soft->bs_vhdl);
673 
674     if (slot != PCIIO_SLOT_NONE) {
675 
676         /*
677          * Currently favored mapping from PCI
678          * slot number and INTA/B/C/D to Bridge
679          * PCI Interrupt Bit Number:
680          *
681          *     SLOT     A B C D
682          *      0       0 4 0 4
683          *      1       1 5 1 5
684          *      2       2 6 2 6
685          *      3       3 7 3 7
686          *      4       4 0 4 0
687          *      5       5 1 5 1
688          *      6       6 2 6 2
689          *      7       7 3 7 3
690          *
691          * XXX- allow pcibr_hints to override default
692          * XXX- allow ADMIN to override pcibr_hints
693          */
694         for (ibit = 0; ibit < 4; ++ibit)
695             pcibr_info->f_ibit[ibit] =
696                 (slot + 4 * ibit) & 7;
697 
698         /*
699          * Record the info in the sparse func info space.
700          */
701         if (func < pcibr_soft->bs_slot[slot].bss_ninfo)
702             pcibr_soft->bs_slot[slot].bss_infos[func] = pcibr_info;
703     }
704     return pcibr_info;
705 }
706 
707 
708 /*
709  * pcibr_device_unregister
710  *      This frees up any hardware resources reserved for this PCI device
711  *      and removes any PCI infrastructural information setup for it.
712  *      This is usually used at the time of shutting down of the PCI card.
713  */
714 int
715 pcibr_device_unregister(vertex_hdl_t pconn_vhdl)
716 {
717     pciio_info_t         pciio_info;
718     vertex_hdl_t         pcibr_vhdl;
719     pciio_slot_t         slot;
720     pcibr_soft_t         pcibr_soft;
721     bridge_t            *bridge;
722     int                  count_vchan0, count_vchan1;
723     unsigned             s;
724     int                  error_call;
725     int                  error = 0;
726 
727     pciio_info = pciio_info_get(pconn_vhdl);
728 
729     pcibr_vhdl = pciio_info_master_get(pciio_info);
730     slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
731 
732     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
733     bridge = pcibr_soft->bs_base;
734 
735     /* Clear all the hardware xtalk resources for this device */
736     xtalk_widgetdev_shutdown(pcibr_soft->bs_conn, slot);
737 
738     /* Flush all the rrbs */
739     pcibr_rrb_flush(pconn_vhdl);
740 
741     /*
742      * If the RRB configuration for this slot has changed, set it 
743      * back to the boot-time default
744      */
745     if (pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] >= 0) {
746 
747         s = pcibr_lock(pcibr_soft);
748 
749         /* PIC NOTE: If this is a BRIDGE, VCHAN2 & VCHAN3 will be zero so
750          * no need to conditionalize this (ie. "if (IS_PIC_SOFT())" ).
751          */
752         pcibr_soft->bs_rrb_res[slot] = pcibr_soft->bs_rrb_res[slot] +
753                                        pcibr_soft->bs_rrb_valid[slot][VCHAN0] +
754                                        pcibr_soft->bs_rrb_valid[slot][VCHAN1] +
755                                        pcibr_soft->bs_rrb_valid[slot][VCHAN2] +
756                                        pcibr_soft->bs_rrb_valid[slot][VCHAN3];
757 
758         /* Free the rrbs allocated to this slot, both the normal & virtual */
759         do_pcibr_rrb_free_all(pcibr_soft, bridge, slot);
760 
761         count_vchan0 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0];
762         count_vchan1 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN1];
763 
764         pcibr_unlock(pcibr_soft, s);
765 
766         pcibr_rrb_alloc(pconn_vhdl, &count_vchan0, &count_vchan1);
767 
768     }
769 
770     /* Flush the write buffers !! */
771     error_call = pcibr_wrb_flush(pconn_vhdl);
772 
773     if (error_call)
774         error = error_call;
775 
776     /* Clear the information specific to the slot */
777     error_call = pcibr_slot_info_free(pcibr_vhdl, slot);
778 
779     if (error_call)
780         error = error_call;
781 
782     return(error);
783     
784 }
785 
786 /*
787  * pcibr_driver_reg_callback
788  *      CDL will call this function for each device found in the PCI
789  *      registry that matches the vendor/device IDs supported by 
790  *      the driver being registered.  The device's connection vertex
791  *      and the driver's attach function return status enable the
792  *      slot's device status to be set.
793  */
794 void
795 pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,
796                           int key1, int key2, int error)
797 {
798     pciio_info_t         pciio_info;
799     pcibr_info_t         pcibr_info;
800     vertex_hdl_t         pcibr_vhdl;
801     pciio_slot_t         slot;
802     pcibr_soft_t         pcibr_soft;
803 
804     /* Do not set slot status for vendor/device ID wildcard drivers */
805     if ((key1 == -1) || (key2 == -1))
806         return;
807 
808     pciio_info = pciio_info_get(pconn_vhdl);
809     pcibr_info = pcibr_info_get(pconn_vhdl);
810 
811     pcibr_vhdl = pciio_info_master_get(pciio_info);
812     slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
813 
814     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
815     pcibr_info->f_att_det_error = error;
816     pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
817 
818     if (error) {
819         pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_INCMPLT;
820     } else {
821         pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT;
822     }
823 }
824 
825 /*
826  * pcibr_driver_unreg_callback
827  *      CDL will call this function for each device found in the PCI
828  *      registry that matches the vendor/device IDs supported by 
829  *      the driver being unregistered.  The device's connection vertex
830  *      and the driver's detach function return status enable the
831  *      slot's device status to be set.
832  */
833 void
834 pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl, 
835                             int key1, int key2, int error)
836 {
837     pciio_info_t         pciio_info;
838     pcibr_info_t         pcibr_info;
839     vertex_hdl_t         pcibr_vhdl;
840     pciio_slot_t         slot;
841     pcibr_soft_t         pcibr_soft;
842 
843     /* Do not set slot status for vendor/device ID wildcard drivers */
844     if ((key1 == -1) || (key2 == -1))
845         return;
846 
847     pciio_info = pciio_info_get(pconn_vhdl);
848     pcibr_info = pcibr_info_get(pconn_vhdl);
849 
850     pcibr_vhdl = pciio_info_master_get(pciio_info);
851     slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
852 
853     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
854     pcibr_info->f_att_det_error = error;
855     pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
856 
857     if (error) {
858         pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_INCMPLT;
859     } else {
860         pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT;
861     }
862 }
863 
864 /* 
865  * build a convenience link path in the
866  * form of ".../<iobrick>/bus/<busnum>"
867  * 
868  * returns 1 on success, 0 otherwise
869  *
870  * depends on hwgraph separator == '/'
871  */
872 int
873 pcibr_bus_cnvlink(vertex_hdl_t f_c)
874 {
875         char dst[MAXDEVNAME];
876         char *dp = dst;
877         char *cp, *xp;
878         int widgetnum;
879         char pcibus[8];
880         vertex_hdl_t nvtx, svtx;
881         int rv;
882 
883         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, f_c, "pcibr_bus_cnvlink\n"));
884 
885         if (GRAPH_SUCCESS != hwgraph_vertex_name_get(f_c, dst, MAXDEVNAME))
886                 return 0;
887 
888         /* dst example == /hw/module/001c02/Pbrick/xtalk/8/pci/direct */
889 
890         /* find the widget number */
891         xp = strstr(dst, "/"EDGE_LBL_XTALK"/");
892         if (xp == NULL)
893                 return 0;
894         widgetnum = simple_strtoul(xp+7, NULL, 0);
895         if (widgetnum < XBOW_PORT_8 || widgetnum > XBOW_PORT_F)
896                 return 0;
897 
898         /* remove "/pci/direct" from path */
899         cp = strstr(dst, "/" EDGE_LBL_PCI "/" EDGE_LBL_DIRECT);
900         if (cp == NULL)
901                 return 0;
902         *cp = (char)NULL;
903 
904         /* get the vertex for the widget */
905         if (GRAPH_SUCCESS != hwgraph_traverse(NULL, dp, &svtx)) 
906                 return 0;
907 
908         *xp = (char)NULL;               /* remove "/xtalk/..." from path */
909 
910         /* dst example now == /hw/module/001c02/Pbrick */
911 
912         /* get the bus number */
913         strcat(dst, "/");
914         strcat(dst, EDGE_LBL_BUS);
915         sprintf(pcibus, "%d", p_busnum[widgetnum]);
916 
917         /* link to bus to widget */
918         rv = hwgraph_path_add(NULL, dp, &nvtx);
919         if (GRAPH_SUCCESS == rv)
920                 rv = hwgraph_edge_add(nvtx, svtx, pcibus);
921 
922         return (rv == GRAPH_SUCCESS);
923 }
924 
925 
926 /*
927  *    pcibr_attach: called every time the crosstalk
928  *      infrastructure is asked to initialize a widget
929  *      that matches the part number we handed to the
930  *      registration routine above.
931  */
932 /*ARGSUSED */
933 int
934 pcibr_attach(vertex_hdl_t xconn_vhdl)
935 {
936     /* REFERENCED */
937     graph_error_t           rc;
938     vertex_hdl_t            pcibr_vhdl;
939     bridge_t               *bridge;
940 
941     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, xconn_vhdl, "pcibr_attach\n"));
942 
943     bridge = (bridge_t *)
944         xtalk_piotrans_addr(xconn_vhdl, NULL,
945                             0, sizeof(bridge_t), 0);
946     /*
947      * Create the vertex for the PCI bus, which we
948      * will also use to hold the pcibr_soft and
949      * which will be the "master" vertex for all the
950      * pciio connection points we will hang off it.
951      * This needs to happen before we call nic_bridge_vertex_info
952      * as we are some of the *_vmc functions need access to the edges.
953      *
954      * Opening this vertex will provide access to
955      * the Bridge registers themselves.
956      */
957     rc = hwgraph_path_add(xconn_vhdl, EDGE_LBL_PCI, &pcibr_vhdl);
958     ASSERT(rc == GRAPH_SUCCESS);
959 
960     pciio_provider_register(pcibr_vhdl, &pcibr_provider);
961     pciio_provider_startup(pcibr_vhdl);
962 
963     return pcibr_attach2(xconn_vhdl, bridge, pcibr_vhdl, 0, NULL);
964 }
965 
966 
967 /*ARGSUSED */
968 int
969 pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge, 
970               vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
971 {
972     /* REFERENCED */
973     vertex_hdl_t            ctlr_vhdl;
974     bridgereg_t             id;
975     int                     rev;
976     pcibr_soft_t            pcibr_soft;
977     pcibr_info_t            pcibr_info;
978     xwidget_info_t          info;
979     xtalk_intr_t            xtalk_intr;
980     int                     slot;
981     int                     ibit;
982     vertex_hdl_t            noslot_conn;
983     char                    devnm[MAXDEVNAME], *s;
984     pcibr_hints_t           pcibr_hints;
985     uint64_t              int_enable;
986     bridgereg_t             int_enable_32;
987     picreg_t                int_enable_64;
988     unsigned                rrb_fixed = 0;
989 
990     int                     spl_level;
991 
992 #if PCI_FBBE
993     int                     fast_back_to_back_enable;
994 #endif
995     nasid_t                 nasid;
996     int                     iobrick_type_get_nasid(nasid_t nasid);
997     int                     iobrick_module_get_nasid(nasid_t nasid);
998 
999     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1000                 "pcibr_attach2: bridge=0x%p, busnum=%d\n", bridge, busnum));
1001 
1002     ctlr_vhdl = NULL;
1003     ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0, 
1004                 0, 0, 0,
1005                 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0, 
1006                 (struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
1007     ASSERT(ctlr_vhdl != NULL);
1008 
1009     /*
1010      * Get the hint structure; if some NIC callback
1011      * marked this vertex as "hands-off" then we
1012      * just return here, before doing anything else.
1013      */
1014     pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
1015 
1016     if (pcibr_hints && pcibr_hints->ph_hands_off)
1017         return -1;                      /* generic operations disabled */
1018 
1019     id = bridge->b_wid_id;
1020     rev = XWIDGET_PART_REV_NUM(id);
1021 
1022     hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, (arbitrary_info_t) rev);
1023 
1024     /*
1025      * allocate soft state structure, fill in some
1026      * fields, and hook it up to our vertex.
1027      */
1028     NEW(pcibr_soft);
1029     if (ret_softp)
1030         *ret_softp = pcibr_soft;
1031     BZERO(pcibr_soft, sizeof *pcibr_soft);
1032     pcibr_soft_set(pcibr_vhdl, pcibr_soft);
1033     pcibr_soft->bs_conn = xconn_vhdl;
1034     pcibr_soft->bs_vhdl = pcibr_vhdl;
1035     pcibr_soft->bs_base = bridge;
1036     pcibr_soft->bs_rev_num = rev;
1037     pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits;
1038 
1039     pcibr_soft->bs_min_slot = 0;                /* lowest possible slot# */
1040     pcibr_soft->bs_max_slot = 7;                /* highest possible slot# */
1041     pcibr_soft->bs_busnum = busnum;
1042     pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
1043     switch(pcibr_soft->bs_bridge_type) {
1044     case PCIBR_BRIDGETYPE_BRIDGE:
1045         pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
1046         pcibr_soft->bs_bridge_mode = 0; /* speed is not available in bridge */
1047         break;
1048     case PCIBR_BRIDGETYPE_PIC:
1049         pcibr_soft->bs_min_slot = 0;
1050         pcibr_soft->bs_max_slot = 3;
1051         pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
1052         pcibr_soft->bs_bridge_mode = 
1053            (((bridge->p_wid_stat_64 & PIC_STAT_PCIX_SPEED) >> 33) |
1054             ((bridge->p_wid_stat_64 & PIC_STAT_PCIX_ACTIVE) >> 33));
1055 
1056         /* We have to clear PIC's write request buffer to avoid parity
1057          * errors.  See PV#854845.
1058          */
1059         {
1060         int i;
1061 
1062         for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) {
1063                 bridge->p_wr_req_lower[i] = 0;
1064                 bridge->p_wr_req_upper[i] = 0;
1065                 bridge->p_wr_req_parity[i] = 0;
1066         }
1067         }
1068 
1069         break;
1070     case PCIBR_BRIDGETYPE_XBRIDGE:
1071         pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
1072         pcibr_soft->bs_bridge_mode = 
1073            ((bridge->b_wid_control & BRIDGE_CTRL_PCI_SPEED) >> 3);
1074         break;
1075     }
1076 
1077     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1078                 "pcibr_attach2: pcibr_soft=0x%x, mode=0x%x\n",
1079                 pcibr_soft, pcibr_soft->bs_bridge_mode));
1080     pcibr_soft->bsi_err_intr = 0;
1081 
1082     /* Bridges up through REV C
1083      * are unable to set the direct
1084      * byteswappers to BYTE_STREAM.
1085      */
1086     if (pcibr_soft->bs_rev_num <= BRIDGE_PART_REV_C) {
1087         pcibr_soft->bs_pio_end_io = PCIIO_WORD_VALUES;
1088         pcibr_soft->bs_pio_end_mem = PCIIO_WORD_VALUES;
1089     }
1090 #if PCIBR_SOFT_LIST
1091     /*
1092      * link all the pcibr_soft structs
1093      */
1094     {
1095         pcibr_list_p            self;
1096 
1097         NEW(self);
1098         self->bl_soft = pcibr_soft;
1099         self->bl_vhdl = pcibr_vhdl;
1100         self->bl_next = pcibr_list;
1101         pcibr_list = self;
1102     }
1103 #endif /* PCIBR_SOFT_LIST */
1104 
1105     /*
1106      * get the name of this bridge vertex and keep the info. Use this
1107      * only where it is really needed now: like error interrupts.
1108      */
1109     s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
1110     pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
1111     strcpy(pcibr_soft->bs_name, s);
1112 
1113     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1114                 "pcibr_attach2: %s ASIC: rev %s (code=0x%x)\n",
1115                 IS_XBRIDGE_SOFT(pcibr_soft) ? "XBridge" :
1116                         IS_PIC_SOFT(pcibr_soft) ? "PIC" : "Bridge", 
1117                 (rev == BRIDGE_PART_REV_A) ? "A" : 
1118                 (rev == BRIDGE_PART_REV_B) ? "B" :
1119                 (rev == BRIDGE_PART_REV_C) ? "C" :
1120                 (rev == BRIDGE_PART_REV_D) ? "D" :
1121                 (rev == XBRIDGE_PART_REV_A) ? "A" :
1122                 (rev == XBRIDGE_PART_REV_B) ? "B" :
1123                 (IS_PIC_PART_REV_A(rev)) ? "A" : 
1124                 "unknown", rev, pcibr_soft->bs_name));
1125 
1126     info = xwidget_info_get(xconn_vhdl);
1127     pcibr_soft->bs_xid = xwidget_info_id_get(info);
1128     pcibr_soft->bs_master = xwidget_info_master_get(info);
1129     pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
1130 
1131     pcibr_soft->bs_first_slot = pcibr_soft->bs_min_slot;
1132     pcibr_soft->bs_last_slot = pcibr_soft->bs_max_slot;
1133     /*
1134      * Bridge can only reset slots 0, 1, 2, and 3.  Ibrick internal
1135      * slots 4, 5, 6, and 7 must be reset as a group, so do not
1136      * reset them.
1137      */
1138     pcibr_soft->bs_last_reset = 3;
1139 
1140     nasid = NASID_GET(bridge);
1141 
1142     if ((pcibr_soft->bs_bricktype = iobrick_type_get_nasid(nasid)) < 0)
1143         printk(KERN_WARNING "0x%p: Unknown bricktype : 0x%x\n", (void *)xconn_vhdl,
1144                                 (unsigned int)pcibr_soft->bs_bricktype);
1145 
1146     pcibr_soft->bs_moduleid = iobrick_module_get_nasid(nasid);
1147 
1148     if (pcibr_soft->bs_bricktype > 0) {
1149         switch (pcibr_soft->bs_bricktype) {
1150         case MODULE_PXBRICK:
1151         case MODULE_IXBRICK:
1152             pcibr_soft->bs_first_slot = 0;
1153             pcibr_soft->bs_last_slot = 1;
1154             pcibr_soft->bs_last_reset = 1;
1155 
1156             /* If Bus 1 has IO9 then there are 4 devices in that bus.  Note
1157              * we figure this out from klconfig since the kernel has yet to 
1158              * probe
1159              */
1160             if (pcibr_widget_to_bus(pcibr_vhdl) == 1) {
1161                 lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
1162 
1163                 while (brd) {
1164                     if (brd->brd_flags & LOCAL_MASTER_IO6) {
1165                         pcibr_soft->bs_last_slot = 3;
1166                         pcibr_soft->bs_last_reset = 3;
1167                     }
1168                     brd = KLCF_NEXT(brd);
1169                 }
1170             }
1171             break;
1172         case MODULE_PBRICK:
1173             pcibr_soft->bs_first_slot = 1;
1174             pcibr_soft->bs_last_slot = 2;
1175             pcibr_soft->bs_last_reset = 2;
1176             break;
1177 
1178         case MODULE_IBRICK:
1179             /*
1180              * Here's the current baseio layout for SN1 style systems:
1181              *
1182              *    0    1    2    3    4    5    6    7          slot#
1183              *
1184              *    x    scsi x    x    ioc3 usb  x    x          O300 Ibrick
1185              *
1186              * x == never occupied
1187              * E == external (add-in) slot
1188              *
1189              */
1190             pcibr_soft->bs_first_slot = 1;      /* Ibrick first slot == 1 */
1191             if (pcibr_soft->bs_xid == 0xe) { 
1192                 pcibr_soft->bs_last_slot = 2;
1193                 pcibr_soft->bs_last_reset = 2;
1194             } else {
1195                 pcibr_soft->bs_last_slot = 6;
1196             }
1197             break;
1198         default:
1199             break;
1200         }
1201 
1202         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1203                     "pcibr_attach2: %cbrick, slots %d-%d\n",
1204                     MODULE_GET_BTCHAR(pcibr_soft->bs_moduleid),
1205                     pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot));
1206     }
1207 
1208     /*
1209      * Initialize bridge and bus locks
1210      */
1211     spin_lock_init(&pcibr_soft->bs_lock);
1212     /*
1213      * If we have one, process the hints structure.
1214      */
1215     if (pcibr_hints) {
1216         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
1217                     "pcibr_attach2: pcibr_hints=0x%x\n", pcibr_hints));
1218 
1219         rrb_fixed = pcibr_hints->ph_rrb_fixed;
1220 
1221         pcibr_soft->bs_rrb_fixed = rrb_fixed;
1222 
1223         if (pcibr_hints->ph_intr_bits) {
1224             pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
1225         }
1226 
1227         for (slot = pcibr_soft->bs_min_slot; 
1228                                 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1229             int hslot = pcibr_hints->ph_host_slot[slot] - 1;
1230 
1231             if (hslot < 0) {
1232                 pcibr_soft->bs_slot[slot].host_slot = slot;
1233             } else {
1234                 pcibr_soft->bs_slot[slot].has_host = 1;
1235                 pcibr_soft->bs_slot[slot].host_slot = hslot;
1236             }
1237         }
1238     }
1239     /*
1240      * Set-up initial values for state fields
1241      */
1242     for (slot = pcibr_soft->bs_min_slot; 
1243                                 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1244         pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
1245         pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
1246         pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
1247         pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
1248         pcibr_soft->bs_slot[slot].bss_ext_ates_active = ATOMIC_INIT(0);
1249         pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
1250     }
1251 
1252     for (ibit = 0; ibit < 8; ++ibit) {
1253         pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
1254         pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
1255         pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
1256         pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_stat = 
1257                                                         &(bridge->b_int_status);
1258         pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
1259         pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
1260         pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
1261         pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
1262     }
1263 
1264     /*
1265      * connect up our error handler.  PIC has 2 busses (thus resulting in 2
1266      * pcibr_soft structs under 1 widget), so only register a xwidget error
1267      * handler for PIC's bus0.  NOTE: for PIC pcibr_error_handler_wrapper()
1268      * is a wrapper routine we register that will call the real error handler
1269      * pcibr_error_handler() with the correct pcibr_soft struct.
1270      */
1271     if (IS_PIC_SOFT(pcibr_soft)) {
1272         if (busnum == 0) {
1273             xwidget_error_register(xconn_vhdl, pcibr_error_handler_wrapper, pcibr_soft);
1274         }
1275     } else {
1276         xwidget_error_register(xconn_vhdl, pcibr_error_handler, pcibr_soft);
1277     }
1278 
1279     /*
1280      * Initialize various Bridge registers.
1281      */
1282   
1283     /*
1284      * On pre-Rev.D bridges, set the PCI_RETRY_CNT
1285      * to zero to avoid dropping stores. (#475347)
1286      */
1287     if (rev < BRIDGE_PART_REV_D)
1288         bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_MASK;
1289 
1290     /*
1291      * Clear all pending interrupts.
1292      */
1293     bridge->b_int_rst_stat = (BRIDGE_IRR_ALL_CLR);
1294 
1295     /* Initialize some PIC specific registers. */
1296     if (IS_PIC_SOFT(pcibr_soft)) {
1297         picreg_t pic_ctrl_reg = bridge->p_wid_control_64;
1298 
1299         /* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
1300         pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK;
1301         pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum);
1302         pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK;
1303         pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK;
1304 
1305         pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP;
1306         pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER;
1307 
1308         /* enable parity checking on PICs internal RAM */
1309         pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
1310         pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;
1311         /* PIC BRINGUP WAR (PV# 862253): dont enable write request
1312          * parity checking.
1313          */
1314         if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
1315             pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ;
1316         }
1317 
1318         bridge->p_wid_control_64 = pic_ctrl_reg;
1319     }
1320 
1321     /*
1322      * Until otherwise set up,
1323      * assume all interrupts are
1324      * from slot 7(Bridge/Xbridge) or 3(PIC).
1325      * XXX. Not sure why we're doing this, made change for PIC
1326      * just to avoid setting reserved bits.
1327      */
1328     if (IS_PIC_SOFT(pcibr_soft))
1329         bridge->b_int_device = (uint32_t) 0x006db6db;
1330     else
1331         bridge->b_int_device = (uint32_t) 0xffffffff;
1332 
1333     {
1334         bridgereg_t             dirmap;
1335         paddr_t                 paddr;
1336         iopaddr_t               xbase;
1337         xwidgetnum_t            xport;
1338         iopaddr_t               offset;
1339         int                     num_entries = 0;
1340         int                     entry;
1341         cnodeid_t               cnodeid;
1342         nasid_t                 nasid;
1343 
1344         /* Set the Bridge's 32-bit PCI to XTalk
1345          * Direct Map register to the most useful
1346          * value we can determine.  Note that we
1347          * must use a single xid for all of:
1348          *      direct-mapped 32-bit DMA accesses
1349          *      direct-mapped 64-bit DMA accesses
1350          *      DMA accesses through the PMU
1351          *      interrupts
1352          * This is the only way to guarantee that
1353          * completion interrupts will reach a CPU
1354          * after all DMA data has reached memory.
1355          * (Of course, there may be a few special
1356          * drivers/controlers that explicitly manage
1357          * this ordering problem.)
1358          */
1359 
1360         cnodeid = 0;  /* default node id */
1361         nasid = COMPACT_TO_NASID_NODEID(cnodeid);
1362         paddr = NODE_OFFSET(nasid) + 0;
1363 
1364         /* currently, we just assume that if we ask
1365          * for a DMA mapping to "zero" the XIO
1366          * host will transmute this into a request
1367          * for the lowest hunk of memory.
1368          */
1369         xbase = xtalk_dmatrans_addr(xconn_vhdl, 0,
1370                                     paddr, _PAGESZ, 0);
1371 
1372         if (xbase != XIO_NOWHERE) {
1373             if (XIO_PACKED(xbase)) {
1374                 xport = XIO_PORT(xbase);
1375                 xbase = XIO_ADDR(xbase);
1376             } else
1377                 xport = pcibr_soft->bs_mxid;
1378 
1379             offset = xbase & ((1ull << BRIDGE_DIRMAP_OFF_ADDRSHFT) - 1ull);
1380             xbase >>= BRIDGE_DIRMAP_OFF_ADDRSHFT;
1381 
1382             dirmap = xport << BRIDGE_DIRMAP_W_ID_SHFT;
1383 
1384             if (xbase)
1385                 dirmap |= BRIDGE_DIRMAP_OFF & xbase;
1386             else if (offset >= (512 << 20))
1387                 dirmap |= BRIDGE_DIRMAP_ADD512;
1388 
1389             bridge->b_dir_map = dirmap;
1390         }
1391         /*
1392          * Set bridge's idea of page size according to the system's
1393          * idea of "IO page size".  TBD: The idea of IO page size
1394          * should really go away.
1395          */
1396         /*
1397          * ensure that we write and read without any interruption.
1398          * The read following the write is required for the Bridge war
1399          */
1400         spl_level = splhi();
1401 #if IOPGSIZE == 4096
1402         if (IS_PIC_SOFT(pcibr_soft)) {
1403             bridge->p_wid_control_64 &= ~BRIDGE_CTRL_PAGE_SIZE;
1404         } else {
1405             bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
1406         }
1407 #elif IOPGSIZE == 16384
1408         if (IS_PIC_SOFT(pcibr_soft)) {
1409             bridge->p_wid_control_64 |= BRIDGE_CTRL_PAGE_SIZE;
1410         } else {
1411             bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE;
1412         }
1413 #else
1414         <<<Unable to deal with IOPGSIZE >>>;
1415 #endif
1416         bridge->b_wid_control;          /* inval addr bug war */
1417         splx(spl_level);
1418 
1419         /* Initialize internal mapping entries */
1420         for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++) {
1421             bridge->b_int_ate_ram[entry].wr = 0;
1422         }
1423 
1424         /*
1425          * Determine if there's external mapping SSRAM on this
1426          * bridge.  Set up Bridge control register appropriately,
1427          * inititlize SSRAM, and set software up to manage RAM
1428          * entries as an allocatable resource.
1429          *
1430          * Currently, we just use the rm* routines to manage ATE
1431          * allocation.  We should probably replace this with a
1432          * Best Fit allocator.
1433          *
1434          * For now, if we have external SSRAM, avoid using
1435          * the internal ssram: we can't turn PREFETCH on
1436          * when we use the internal SSRAM; and besides,
1437          * this also guarantees that no allocation will
1438          * straddle the internal/external line, so we
1439          * can increment ATE write addresses rather than
1440          * recomparing against BRIDGE_INTERNAL_ATES every
1441          * time.
1442          */
1443 
1444         if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
1445                 num_entries = 0;
1446         else
1447                 num_entries = pcibr_init_ext_ate_ram(bridge);
1448 
1449         /* we always have 128 ATEs (512 for Xbridge) inside the chip
1450          * even if disabled for debugging.
1451          */
1452         pcibr_soft->bs_int_ate_resource.start = 0;
1453         pcibr_soft->bs_int_ate_resource.end = pcibr_soft->bs_int_ate_size - 1;
1454 
1455         if (num_entries > pcibr_soft->bs_int_ate_size) {
1456 #if PCIBR_ATE_NOTBOTH                   /* for debug -- forces us to use external ates */
1457             printk("pcibr_attach: disabling internal ATEs.\n");
1458             pcibr_ate_alloc(pcibr_soft, pcibr_soft->bs_int_ate_size);
1459 #endif
1460            pcibr_soft->bs_ext_ate_resource.start = pcibr_soft->bs_int_ate_size;
1461            pcibr_soft->bs_ext_ate_resource.end = num_entries;
1462         }
1463 
1464         pcibr_soft->bs_allocated_ate_res = (void *) kmalloc(pcibr_soft->bs_int_ate_size * sizeof(unsigned long), GFP_KERNEL);
1465         memset(pcibr_soft->bs_allocated_ate_res, 0x0, pcibr_soft->bs_int_ate_size * sizeof(unsigned long));
1466 
1467         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, pcibr_vhdl,
1468                     "pcibr_attach2: %d ATEs, %d internal & %d external\n",
1469                     num_entries ? num_entries : pcibr_soft->bs_int_ate_size,
1470                     pcibr_soft->bs_int_ate_size,
1471                     num_entries ? num_entries-pcibr_soft->bs_int_ate_size : 0));
1472     }
1473 
1474     {
1475         bridgereg_t             dirmap;
1476         iopaddr_t               xbase;
1477 
1478         /*
1479          * now figure the *real* xtalk base address
1480          * that dirmap sends us to.
1481          */
1482         dirmap = bridge->b_dir_map;
1483         if (dirmap & BRIDGE_DIRMAP_OFF)
1484             xbase = (iopaddr_t)(dirmap & BRIDGE_DIRMAP_OFF)
1485                         << BRIDGE_DIRMAP_OFF_ADDRSHFT;
1486         else if (dirmap & BRIDGE_DIRMAP_ADD512)
1487             xbase = 512 << 20;
1488         else
1489             xbase = 0;
1490 
1491         pcibr_soft->bs_dir_xbase = xbase;
1492 
1493         /* it is entirely possible that we may, at this
1494          * point, have our dirmap pointing somewhere
1495          * other than our "master" port.
1496          */
1497         pcibr_soft->bs_dir_xport =
1498             (dirmap & BRIDGE_DIRMAP_W_ID) >> BRIDGE_DIRMAP_W_ID_SHFT;
1499     }
1500 
1501     /* pcibr sources an error interrupt;
1502      * figure out where to send it.
1503      *
1504      * If any interrupts are enabled in bridge,
1505      * then the prom set us up and our interrupt
1506      * has already been reconnected in mlreset
1507      * above.
1508      *
1509      * Need to set the D_INTR_ISERR flag
1510      * in the dev_desc used for allocating the
1511      * error interrupt, so our interrupt will
1512      * be properly routed and prioritized.
1513      *
1514      * If our crosstalk provider wants to
1515      * fix widget error interrupts to specific
1516      * destinations, D_INTR_ISERR is how it
1517      * knows to do this.
1518      */
1519 
1520     xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);
1521         {
1522                 int irq = ((hub_intr_t)xtalk_intr)->i_bit;
1523                 int cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;
1524 
1525                 intr_unreserve_level(cpu, irq);
1526                 ((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
1527         }
1528     ASSERT(xtalk_intr != NULL);
1529 
1530     pcibr_soft->bsi_err_intr = xtalk_intr;
1531 
1532     /*
1533      * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
1534      * in order to work around some addressing limitations.  In order
1535      * for that fire wall to work properly, we need to make sure we
1536      * start from a known clean state.
1537      */
1538     pcibr_clearwidint(bridge);
1539 
1540     xtalk_intr_connect(xtalk_intr, (intr_func_t) pcibr_error_intr_handler,
1541                 (intr_arg_t) pcibr_soft, (xtalk_intr_setfunc_t)pcibr_setwidint, (void *)bridge);
1542 
1543     request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, "PCIBR error",
1544                                         (intr_arg_t) pcibr_soft);
1545 
1546     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
1547                 "pcibr_setwidint: b_wid_int_upper=0x%x, b_wid_int_lower=0x%x\n",
1548                 bridge->b_wid_int_upper, bridge->b_wid_int_lower));
1549 
1550     /*
1551      * now we can start handling error interrupts;
1552      * enable all of them.
1553      * NOTE: some PCI ints may already be enabled.
1554      */
1555     /* We read the INT_ENABLE register as a 64bit picreg_t for PIC and a
1556      * 32bit bridgereg_t for BRIDGE, but always process the result as a
1557      * 64bit value so the code can be "common" for both PIC and BRIDGE...
1558      */
1559     if (IS_PIC_SOFT(pcibr_soft)) {
1560         int_enable_64 = bridge->p_int_enable_64 | BRIDGE_ISR_ERRORS;
1561         int_enable = (uint64_t)int_enable_64;
1562 #ifdef PFG_TEST
1563         int_enable = (uint64_t)0x7ffffeff7ffffeff;
1564 #endif
1565     } else {
1566         int_enable_32 = bridge->b_int_enable | (BRIDGE_ISR_ERRORS & 0xffffffff);
1567         int_enable = ((uint64_t)int_enable_32 & 0xffffffff);
1568 #ifdef PFG_TEST
1569         int_enable = (uint64_t)0x7ffffeff;
1570 #endif
1571     }
1572 
1573 
1574 #if BRIDGE_ERROR_INTR_WAR
1575     if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
1576         /*
1577          * We commonly get master timeouts when talking to ql.
1578          * We also see RESP_XTALK_ERROR and LLP_TX_RETRY interrupts.
1579          * Insure that these are all disabled for now.
1580          */
1581         int_enable &= ~(BRIDGE_IMR_PCI_MST_TIMEOUT |
1582                         BRIDGE_ISR_RESP_XTLK_ERR |
1583                         BRIDGE_ISR_LLP_TX_RETRY);
1584     }
1585     if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_C) {
1586         int_enable &= ~BRIDGE_ISR_BAD_XRESP_PKT;
1587     }
1588 #endif                          /* BRIDGE_ERROR_INTR_WAR */
1589 
1590 #ifdef QL_SCSI_CTRL_WAR                 /* for IP30 only */
1591     /* Really a QL rev A issue, but all newer hearts have newer QLs.
1592      * Forces all IO6/MSCSI to be new.
1593      */
1594     if (heart_rev() == HEART_REV_A)
1595         int_enable &= ~BRIDGE_IMR_PCI_MST_TIMEOUT;
1596 #endif
1597 
1598 #ifdef BRIDGE1_TIMEOUT_WAR
1599     if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
1600         /*
1601          * Turn off these interrupts.  They can't be trusted in bridge 1
1602          */
1603         int_enable &= ~(BRIDGE_IMR_XREAD_REQ_TIMEOUT |
1604                         BRIDGE_IMR_UNEXP_RESP);
1605     }
1606 #endif
1607 
1608     /* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
1609      * locked out to be freed up sooner (by timing out) so that the
1610      * read tnums are never completely used up.
1611      */
1612     if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) {
1613         int_enable &= ~PIC_ISR_PCIX_REQ_TOUT;
1614         int_enable &= ~BRIDGE_ISR_XREAD_REQ_TIMEOUT;
1615 
1616         bridge->b_wid_req_timeout = 0x750;
1617     }
1618 
1619     /*
1620      * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use
1621      * RRB0, RRB8, RRB1, and RRB9.  Assign them to DEVICE[2|3]--VCHAN3
1622      * so they are not used
1623      */
1624     if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) {
1625         bridge->b_even_resp |= 0x000f000f;
1626         bridge->b_odd_resp |= 0x000f000f;
1627     }
1628 
1629     if (IS_PIC_SOFT(pcibr_soft)) {
1630         bridge->p_int_enable_64 = (picreg_t)int_enable;
1631     } else {
1632         bridge->b_int_enable = (bridgereg_t)int_enable;
1633     }
1634     bridge->b_int_mode = 0;             /* do not send "clear interrupt" packets */
1635 
1636     bridge->b_wid_tflush;               /* wait until Bridge PIO complete */
1637 
1638     /*
1639      * Depending on the rev of bridge, disable certain features.
1640      * Easiest way seems to be to force the PCIBR_NOwhatever
1641      * flag to be on for all DMA calls, which overrides any
1642      * PCIBR_whatever flag or even the setting of whatever
1643      * from the PCIIO_DMA_class flags (or even from the other
1644      * PCIBR flags, since NO overrides YES).
1645      */
1646     pcibr_soft->bs_dma_flags = 0;
1647 
1648     /* PREFETCH:
1649      * Always completely disabled for REV.A;
1650      * at "pcibr_prefetch_enable_rev", anyone
1651      * asking for PCIIO_PREFETCH gets it.
1652      * Between these two points, you have to ask
1653      * for PCIBR_PREFETCH, which promises that
1654      * your driver knows about known Bridge WARs.
1655      */
1656     if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
1657         pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
1658     else if (pcibr_soft->bs_rev_num < 
1659                 (BRIDGE_WIDGET_PART_NUM << 4))
1660         pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
1661 
1662     /* WRITE_GATHER: Disabled */
1663     if (pcibr_soft->bs_rev_num < 
1664                 (BRIDGE_WIDGET_PART_NUM << 4))
1665         pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
1666 
1667     /* PIC only supports 64-bit direct mapping in PCI-X mode.  Since
1668      * all PCI-X devices that initiate memory transactions must be
1669      * capable of generating 64-bit addressed, we force 64-bit DMAs.
1670      */
1671     if (IS_PCIX(pcibr_soft)) {
1672         pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64;
1673     }
1674 
1675     {
1676 
1677     iopaddr_t               prom_base_addr = pcibr_soft->bs_xid << 24;
1678     int                     prom_base_size = 0x1000000;
1679     int                     status;
1680     struct resource         *res;
1681 
1682     /* Allocate resource maps based on bus page size; for I/O and memory
1683      * space, free all pages except those in the base area and in the
1684      * range set by the PROM. 
1685      *
1686      * PROM creates BAR addresses in this format: 0x0ws00000 where w is
1687      * the widget number and s is the device register offset for the slot.
1688      */
1689 
1690     /* Setup the Bus's PCI IO Root Resource. */
1691     pcibr_soft->bs_io_win_root_resource.start = PCIBR_BUS_IO_BASE;
1692     pcibr_soft->bs_io_win_root_resource.end = 0xffffffff;
1693     res = (struct resource *) kmalloc( sizeof(struct resource), KM_NOSLEEP);
1694     if (!res)
1695         panic("PCIBR:Unable to allocate resource structure\n");
1696 
1697     /* Block off the range used by PROM. */
1698     res->start = prom_base_addr;
1699     res->end = prom_base_addr + (prom_base_size - 1);
1700     status = request_resource(&pcibr_soft->bs_io_win_root_resource, res);
1701     if (status)
1702         panic("PCIBR:Unable to request_resource()\n");
1703 
1704     /* Setup the Small Window Root Resource */
1705     pcibr_soft->bs_swin_root_resource.start = _PAGESZ;
1706     pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF;
1707 
1708     /* Setup the Bus's PCI Memory Root Resource */
1709     pcibr_soft->bs_mem_win_root_resource.start = 0x200000;
1710     pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff;
1711     res = (struct resource *) kmalloc( sizeof(struct resource), KM_NOSLEEP);
1712     if (!res)
1713         panic("PCIBR:Unable to allocate resource structure\n");
1714 
1715     /* Block off the range used by PROM. */
1716     res->start = prom_base_addr;
1717     res->end = prom_base_addr + (prom_base_size - 1);;
1718     status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res);
1719     if (status)
1720         panic("PCIBR:Unable to request_resource()\n");
1721 
1722     }
1723 
1724     /* build "no-slot" connection point
1725      */
1726     pcibr_info = pcibr_device_info_new
1727         (pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE,
1728          PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
1729     noslot_conn = pciio_device_info_register
1730         (pcibr_vhdl, &pcibr_info->f_c);
1731 
1732     /* Remember the no slot connection point info for tearing it
1733      * down during detach.
1734      */
1735     pcibr_soft->bs_noslot_conn = noslot_conn;
1736     pcibr_soft->bs_noslot_info = pcibr_info;
1737 #if PCI_FBBE
1738     fast_back_to_back_enable = 1;
1739 #endif
1740 
1741 #if PCI_FBBE
1742     if (fast_back_to_back_enable) {
1743         /*
1744          * All devices on the bus are capable of fast back to back, so
1745          * we need to set the fast back to back bit in all devices on
1746          * the bus that are capable of doing such accesses.
1747          */
1748     }
1749 #endif
1750 
1751     for (slot = pcibr_soft->bs_min_slot; 
1752                                 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1753         /* Find out what is out there */
1754         (void)pcibr_slot_info_init(pcibr_vhdl,slot);
1755     }
1756     for (slot = pcibr_soft->bs_min_slot; 
1757                                 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1758         /* Set up the address space for this slot in the PCI land */
1759         (void)pcibr_slot_addr_space_init(pcibr_vhdl, slot);
1760 
1761     for (slot = pcibr_soft->bs_min_slot; 
1762                                 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1763         /* Setup the device register */
1764         (void)pcibr_slot_device_init(pcibr_vhdl, slot);
1765 
1766     if (IS_PCIX(pcibr_soft)) {
1767         pcibr_soft->bs_pcix_rbar_inuse = 0;
1768         pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
1769         pcibr_soft->bs_pcix_rbar_percent_allowed = 
1770                                         pcibr_pcix_rbars_calc(pcibr_soft);
1771 
1772         for (slot = pcibr_soft->bs_min_slot; 
1773                                 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1774             /* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */
1775             (void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
1776     }
1777 
1778     /* Set up convenience links */
1779     if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
1780         pcibr_bus_cnvlink(pcibr_soft->bs_vhdl);
1781 
1782     for (slot = pcibr_soft->bs_min_slot; 
1783                                 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1784         /* Setup host/guest relations */
1785         (void)pcibr_slot_guest_info_init(pcibr_vhdl, slot);
1786 
1787     /* Handle initial RRB management for Bridge and Xbridge */
1788     pcibr_initial_rrb(pcibr_vhdl, 
1789                       pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot);
1790     
1791 {  /* Before any drivers get called that may want to re-allocate
1792     * RRB's, let's get some special cases pre-allocated. Drivers
1793     * may override these pre-allocations, but by doing pre-allocations
1794     * now we're assured not to step all over what the driver intended.
1795     *
1796     * Note: Someday this should probably be moved over to pcibr_rrb.c
1797     */
1798     /*
1799      * Each Pbrick PCI bus only has slots 1 and 2.   Similarly for
1800      * widget 0xe on Ibricks.  Allocate RRB's accordingly.
1801      */
1802     if (pcibr_soft->bs_bricktype > 0) {
1803         switch (pcibr_soft->bs_bricktype) {
1804         case MODULE_PBRICK:
1805                 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
1806                 do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
1807                 break;
1808         case MODULE_IBRICK:
1809                 /* port 0xe on the Ibrick only has slots 1 and 2 */
1810                 if (pcibr_soft->bs_xid == 0xe) {
1811                         do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
1812                         do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
1813                 }
1814                 else {
1815                         /* allocate one RRB for the serial port */
1816                         do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1);
1817                 }
1818                 break;
1819         case MODULE_PXBRICK:
1820         case MODULE_IXBRICK:
1821                 /* 
1822                  * If the IO9 is in the PXBrick (bus1, slot1) allocate
1823                  * RRBs to all the devices
1824                  */
1825                 if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) &&
1826                     (pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) &&
1827                     (pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) {
1828                         do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 4);
1829                         do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 4);
1830                         do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 4);
1831                         do_pcibr_rrb_autoalloc(pcibr_soft, 3, VCHAN0, 4);
1832                 } else {
1833                         do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 8);
1834                         do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
1835                 }
1836                 break;
1837         } /* switch */
1838     }
1839 
1840 #ifdef LATER
1841     if (strstr(nicinfo, XTALK_PCI_PART_NUM)) {
1842         do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
1843     }
1844 #endif
1845 }  /* OK Special RRB allocations are done. */
1846 
1847     for (slot = pcibr_soft->bs_min_slot; 
1848                                 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1849         /* Call the device attach */
1850         (void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
1851 
1852     pciio_device_attach(noslot_conn, (int)0);
1853 
1854     return 0;
1855 }
1856 
1857 /*
1858  * pcibr_detach:
1859  *      Detach the bridge device from the hwgraph after cleaning out all the 
1860  *      underlying vertices.
1861  */
1862 
1863 int
1864 pcibr_detach(vertex_hdl_t xconn)
1865 {
1866     pciio_slot_t        slot;
1867     vertex_hdl_t        pcibr_vhdl;
1868     pcibr_soft_t        pcibr_soft;
1869     bridge_t            *bridge;
1870     unsigned             s;
1871 
1872     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, xconn, "pcibr_detach\n"));
1873 
1874     /* Get the bridge vertex from its xtalk connection point */
1875     if (hwgraph_traverse(xconn, EDGE_LBL_PCI, &pcibr_vhdl) != GRAPH_SUCCESS)
1876         return(1);
1877 
1878     pcibr_soft = pcibr_soft_get(pcibr_vhdl);
1879     bridge = pcibr_soft->bs_base;
1880 
1881 
1882     s = pcibr_lock(pcibr_soft);
1883     /* Disable the interrupts from the bridge */
1884     if (IS_PIC_SOFT(pcibr_soft)) {
1885         bridge->p_int_enable_64 = 0;
1886     } else {
1887         bridge->b_int_enable = 0;
1888     }
1889     pcibr_unlock(pcibr_soft, s);
1890 
1891     /* Detach all the PCI devices talking to this bridge */
1892     for (slot = pcibr_soft->bs_min_slot; 
1893                                 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1894         pcibr_slot_detach(pcibr_vhdl, slot, 0, (char *)NULL, (int *)NULL);
1895     }
1896 
1897     /* Unregister the no-slot connection point */
1898     pciio_device_info_unregister(pcibr_vhdl,
1899                                  &(pcibr_soft->bs_noslot_info->f_c));
1900 
1901     spin_lock_destroy(&pcibr_soft->bs_lock);
1902     kfree(pcibr_soft->bs_name);
1903     
1904     /* Disconnect the error interrupt and free the xtalk resources 
1905      * associated with it.
1906      */
1907     xtalk_intr_disconnect(pcibr_soft->bsi_err_intr);
1908     xtalk_intr_free(pcibr_soft->bsi_err_intr);
1909 
1910     /* Clear the software state maintained by the bridge driver for this
1911      * bridge.
1912      */
1913     DEL(pcibr_soft);
1914     /* Remove the Bridge revision labelled info */
1915     (void)hwgraph_info_remove_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, NULL);
1916     /* Remove the character device associated with this bridge */
1917     (void)hwgraph_edge_remove(pcibr_vhdl, EDGE_LBL_CONTROLLER, NULL);
1918     /* Remove the PCI bridge vertex */
1919     (void)hwgraph_edge_remove(xconn, EDGE_LBL_PCI, NULL);
1920 
1921     return(0);
1922 }
1923 
1924 int
1925 pcibr_asic_rev(vertex_hdl_t pconn_vhdl)
1926 {
1927     vertex_hdl_t          pcibr_vhdl;
1928     int                     tmp_vhdl;
1929     arbitrary_info_t        ainfo;
1930 
1931     if (GRAPH_SUCCESS !=
1932         hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl))
1933         return -1;
1934 
1935     tmp_vhdl = hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo);
1936 
1937     /*
1938      * Any hwgraph function that returns a vertex handle will implicity
1939      * increment that vertex's reference count.  The caller must explicity
1940      * decrement the vertex's referece count after the last reference to
1941      * that vertex.
1942      *
1943      * Decrement reference count incremented by call to hwgraph_traverse().
1944      *
1945      */
1946     hwgraph_vertex_unref(pcibr_vhdl);
1947 
1948     if (tmp_vhdl != GRAPH_SUCCESS) 
1949         return -1;
1950     return (int) ainfo;
1951 }
1952 
1953 int
1954 pcibr_write_gather_flush(vertex_hdl_t pconn_vhdl)
1955 {
1956     pciio_info_t  pciio_info = pciio_info_get(pconn_vhdl);
1957     pcibr_soft_t  pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
1958     pciio_slot_t  slot;
1959     slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
1960     pcibr_device_write_gather_flush(pcibr_soft, slot);
1961     return 0;
1962 }
1963 
1964 /* =====================================================================
1965  *    PIO MANAGEMENT
1966  */
1967 
1968 static iopaddr_t
1969 pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
1970                       pciio_slot_t slot,
1971                       pciio_space_t space,
1972                       iopaddr_t pci_addr,
1973                       size_t req_size,
1974                       unsigned flags)
1975 {
1976     pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
1977     pciio_info_t            pciio_info = &pcibr_info->f_c;
1978     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
1979     bridge_t               *bridge = pcibr_soft->bs_base;
1980 
1981     unsigned                bar;        /* which BASE reg on device is decoding */
1982     iopaddr_t               xio_addr = XIO_NOWHERE;
1983     iopaddr_t               base;       /* base of devio(x) mapped area on PCI */
1984     iopaddr_t               limit;      /* base of devio(x) mapped area on PCI */
1985 
1986     pciio_space_t           wspace;     /* which space device is decoding */
1987     iopaddr_t               wbase;      /* base of device decode on PCI */
1988     size_t                  wsize;      /* size of device decode on PCI */
1989 
1990     int                     try;        /* DevIO(x) window scanning order control */
1991     int                     maxtry, halftry;
1992     int                     win;        /* which DevIO(x) window is being used */
1993     pciio_space_t           mspace;     /* target space for devio(x) register */
1994     iopaddr_t               mbase;      /* base of devio(x) mapped area on PCI */
1995     size_t                  msize;      /* size of devio(x) mapped area on PCI */
1996     size_t                  mmask;      /* addr bits stored in Device(x) */
1997     char                    tmp_str[512];
1998 
1999     unsigned long           s;
2000 
2001     s = pcibr_lock(pcibr_soft);
2002 
2003     if (pcibr_soft->bs_slot[slot].has_host) {
2004         slot = pcibr_soft->bs_slot[slot].host_slot;
2005         pcibr_info = pcibr_soft->bs_slot[slot].bss_infos[0];
2006 
2007         /*
2008          * Special case for dual-slot pci devices such as ioc3 on IP27
2009          * baseio.  In these cases, pconn_vhdl should never be for a pci
2010          * function on a subordiate PCI bus, so we can safely reset pciio_info
2011          * to be the info struct embedded in pcibr_info.  Failure to do this
2012          * results in using a bogus pciio_info_t for calculations done later
2013          * in this routine.
2014          */
2015 
2016         pciio_info = &pcibr_info->f_c;
2017     }
2018     if (space == PCIIO_SPACE_NONE)
2019         goto done;
2020 
2021     if (space == PCIIO_SPACE_CFG) {
2022         /*
2023          * Usually, the first mapping
2024          * established to a PCI device
2025          * is to its config space.
2026          *
2027          * In any case, we definitely
2028          * do NOT need to worry about
2029          * PCI BASE registers, and
2030          * MUST NOT attempt to point
2031          * the DevIO(x) window at
2032          * this access ...
2033          */
2034         if (((flags & PCIIO_BYTE_STREAM) == 0) &&
2035             ((pci_addr + req_size) <= BRIDGE_TYPE0_CFG_FUNC_OFF))
2036             xio_addr = pci_addr + PCIBR_TYPE0_CFG_DEV(pcibr_soft, slot);
2037 
2038         goto done;
2039     }
2040     if (space == PCIIO_SPACE_ROM) {
2041         /* PIO to the Expansion Rom.
2042          * Driver is responsible for
2043          * enabling and disabling
2044          * decodes properly.
2045          */
2046         wbase = pciio_info->c_rbase;
2047         wsize = pciio_info->c_rsize;
2048 
2049         /*
2050          * While the driver should know better
2051          * than to attempt to map more space
2052          * than the device is decoding, he might
2053          * do it; better to bail out here.
2054          */
2055         if ((pci_addr + req_size) > wsize)
2056             goto done;
2057 
2058         pci_addr += wbase;
2059         space = PCIIO_SPACE_MEM;
2060     }
2061     /*
2062      * reduce window mappings to raw
2063      * space mappings (maybe allocating
2064      * windows), and try for DevIO(x)
2065      * usage (setting it if it is available).
2066      */
2067     bar = space - PCIIO_SPACE_WIN0;
2068     if (bar < 6) {
2069         wspace = pciio_info->c_window[bar].w_space;
2070         if (wspace == PCIIO_SPACE_NONE)
2071             goto done;
2072 
2073         /* get PCI base and size */
2074         wbase = pciio_info->c_window[bar].w_base;
2075         wsize = pciio_info->c_window[bar].w_size;
2076 
2077         /*
2078          * While the driver should know better
2079          * than to attempt to map more space
2080          * than the device is decoding, he might
2081          * do it; better to bail out here.
2082          */
2083         if ((pci_addr + req_size) > wsize)
2084             goto done;
2085 
2086         /* shift from window relative to
2087          * decoded space relative.
2088          */
2089         pci_addr += wbase;
2090         space = wspace;
2091     } else
2092         bar = -1;
2093 
2094     /* Scan all the DevIO(x) windows twice looking for one
2095      * that can satisfy our request. The first time through,
2096      * only look at assigned windows; the second time, also
2097      * look at PCIIO_SPACE_NONE windows. Arrange the order
2098      * so we always look at our own window first.
2099      *
2100      * We will not attempt to satisfy a single request
2101      * by concatinating multiple windows.
2102      */
2103     maxtry = PCIBR_NUM_SLOTS(pcibr_soft) * 2;
2104     halftry = PCIBR_NUM_SLOTS(pcibr_soft) - 1;
2105     for (try = 0; try < maxtry; ++try) {
2106         bridgereg_t             devreg;
2107         unsigned                offset;
2108 
2109         /* calculate win based on slot, attempt, and max possible
2110            devices on bus */
2111         win = (try + slot) % PCIBR_NUM_SLOTS(pcibr_soft);
2112 
2113         /* If this DevIO(x) mapping area can provide
2114          * a mapping to this address, use it.
2115          */
2116         msize = (win < 2) ? 0x200000 : 0x100000;
2117         mmask = -msize;
2118         if (space != PCIIO_SPACE_IO)
2119             mmask &= 0x3FFFFFFF;
2120 
2121         offset = pci_addr & (msize - 1);
2122 
2123         /* If this window can't possibly handle that request,
2124          * go on to the next window.
2125          */
2126         if (((pci_addr & (msize - 1)) + req_size) > msize)
2127             continue;
2128 
2129         devreg = pcibr_soft->bs_slot[win].bss_device;
2130 
2131         /* Is this window "nailed down"?
2132          * If not, maybe we can use it.
2133          * (only check this the second time through)
2134          */
2135         mspace = pcibr_soft->bs_slot[win].bss_devio.bssd_space;
2136         if ((try > halftry) && (mspace == PCIIO_SPACE_NONE)) {
2137 
2138             /* If this is the primary DevIO(x) window
2139              * for some other device, skip it.
2140              */
2141             if ((win != slot) &&
2142                 (PCIIO_VENDOR_ID_NONE !=
2143                  pcibr_soft->bs_slot[win].bss_vendor_id))
2144                 continue;
2145 
2146             /* It's a free window, and we fit in it.
2147              * Set up Device(win) to our taste.
2148              */
2149             mbase = pci_addr & mmask;
2150 
2151             /* check that we would really get from
2152              * here to there.
2153              */
2154             if ((mbase | offset) != pci_addr)
2155                 continue;
2156 
2157             devreg &= ~BRIDGE_DEV_OFF_MASK;
2158             if (space != PCIIO_SPACE_IO)
2159                 devreg |= BRIDGE_DEV_DEV_IO_MEM;
2160             else
2161                 devreg &= ~BRIDGE_DEV_DEV_IO_MEM;
2162             devreg |= (mbase >> 20) & BRIDGE_DEV_OFF_MASK;
2163 
2164             /* default is WORD_VALUES.
2165              * if you specify both,
2166              * operation is undefined.
2167              */
2168             if (flags & PCIIO_BYTE_STREAM)
2169                 devreg |= BRIDGE_DEV_DEV_SWAP;
2170             else
2171                 devreg &= ~BRIDGE_DEV_DEV_SWAP;
2172 
2173             if (pcibr_soft->bs_slot[win].bss_device != devreg) {
2174                 if ( IS_PIC_SOFT(pcibr_soft) ) {
2175                         bridge->b_device[win].reg = devreg;
2176                         pcibr_soft->bs_slot[win].bss_device = devreg;
2177                         bridge->b_wid_tflush;   /* wait until Bridge PIO complete */
2178                 }
2179                 else {
2180                         if (io_get_sh_swapper(NASID_GET(bridge))) {
2181                                 BRIDGE_REG_SET32((&bridge->b_device[win].reg)) = __swab32(devreg);
2182                                 pcibr_soft->bs_slot[win].bss_device = devreg;
2183                                 BRIDGE_REG_GET32((&bridge->b_wid_tflush)); /* wait until Bridge PIO complete */
2184                         } else {
2185                                 bridge->b_device[win].reg = devreg;
2186                                 pcibr_soft->bs_slot[win].bss_device = devreg;
2187                                 bridge->b_wid_tflush;   /* wait until Bridge PIO complete */
2188                         }
2189                 }
2190 
2191 #ifdef PCI_LATER
2192                 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pconn_vhdl, 
2193                             "pcibr_addr_pci_to_xio: Device(%d): %x\n",
2194                             win, devreg, device_bits));
2195 #endif
2196             }
2197             pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
2198             pcibr_soft->bs_slot[win].bss_devio.bssd_base = mbase;
2199             xio_addr = PCIBR_BRIDGE_DEVIO(pcibr_soft, win) + (pci_addr - mbase);
2200 
2201             /* Increment this DevIO's use count */
2202             pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
2203 
2204             /* Save the DevIO register index used to access this BAR */
2205             if (bar != -1)
2206                 pcibr_info->f_window[bar].w_devio_index = win;
2207 
2208             /*
2209              * The kernel only allows functions to have so many variable args,
2210              * attempting to call PCIBR_DEBUG_ALWAYS() with more than 5 printk
2211              * arguments fails so sprintf() it into a temporary string.
2212              */
2213             if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2214                 sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to [%lx..%lx] for "
2215                         "slot %d allocates DevIO(%d) Device(%d) set to %lx\n",
2216                         (unsigned long)pci_addr, (unsigned long)(pci_addr + req_size - 1),
2217                         (unsigned int)slot, win, win, (unsigned long)devreg);
2218                 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2219             }
2220             goto done;
2221         }                               /* endif DevIO(x) not pointed */
2222         mbase = pcibr_soft->bs_slot[win].bss_devio.bssd_base;
2223 
2224         /* Now check for request incompat with DevIO(x)
2225          */
2226         if ((mspace != space) ||
2227             (pci_addr < mbase) ||
2228             ((pci_addr + req_size) > (mbase + msize)) ||
2229             ((flags & PCIIO_BYTE_STREAM) && !(devreg & BRIDGE_DEV_DEV_SWAP)) ||
2230             (!(flags & PCIIO_BYTE_STREAM) && (devreg & BRIDGE_DEV_DEV_SWAP)))
2231             continue;
2232 
2233         /* DevIO(x) window is pointed at PCI space
2234          * that includes our target. Calculate the
2235          * final XIO address, release the lock and
2236          * return.
2237          */
2238         xio_addr = PCIBR_BRIDGE_DEVIO(pcibr_soft, win) + (pci_addr - mbase);
2239 
2240         /* Increment this DevIO's use count */
2241         pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
2242 
2243         /* Save the DevIO register index used to access this BAR */
2244         if (bar != -1)
2245             pcibr_info->f_window[bar].w_devio_index = win;
2246 
2247         if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2248             PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2249         }
2250         goto done;
2251     }
2252 
2253     switch (space) {
2254         /*
2255          * Accesses to device decode
2256          * areas that do a not fit
2257          * within the DevIO(x) space are
2258          * modified to be accesses via
2259          * the direct mapping areas.
2260          *
2261          * If necessary, drivers can
2262          * explicitly ask for mappings
2263          * into these address spaces,
2264          * but this should never be needed.
2265          */
2266     case PCIIO_SPACE_MEM:               /* "mem space" */
2267     case PCIIO_SPACE_MEM32:             /* "mem, use 32-bit-wide bus" */
2268         if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) {        /* PIC bus 0 */
2269                 base = PICBRIDGE0_PCI_MEM32_BASE;
2270                 limit = PICBRIDGE0_PCI_MEM32_LIMIT;
2271         } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
2272                 base = PICBRIDGE1_PCI_MEM32_BASE;
2273                 limit = PICBRIDGE1_PCI_MEM32_LIMIT;
2274         } else {                                        /* Bridge/Xbridge */
2275                 base = BRIDGE_PCI_MEM32_BASE;
2276                 limit = BRIDGE_PCI_MEM32_LIMIT;
2277         }
2278 
2279         if ((pci_addr + base + req_size - 1) <= limit)
2280             xio_addr = pci_addr + base;
2281         break;
2282 
2283     case PCIIO_SPACE_MEM64:             /* "mem, use 64-bit-wide bus" */
2284         if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) {        /* PIC bus 0 */
2285                 base = PICBRIDGE0_PCI_MEM64_BASE;
2286                 limit = PICBRIDGE0_PCI_MEM64_LIMIT;
2287         } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
2288                 base = PICBRIDGE1_PCI_MEM64_BASE;
2289                 limit = PICBRIDGE1_PCI_MEM64_LIMIT;
2290         } else {                                        /* Bridge/Xbridge */
2291                 base = BRIDGE_PCI_MEM64_BASE;
2292                 limit = BRIDGE_PCI_MEM64_LIMIT;
2293         }
2294 
2295         if ((pci_addr + base + req_size - 1) <= limit)
2296             xio_addr = pci_addr + base;
2297         break;
2298 
2299     case PCIIO_SPACE_IO:                /* "i/o space" */
2300         /*
2301          * PIC bridges do not support big-window aliases into PCI I/O space
2302          */
2303         if (IS_PIC_SOFT(pcibr_soft)) {
2304                 xio_addr = XIO_NOWHERE;
2305                 break;
2306         }
2307 
2308         /* Bridge Hardware Bug WAR #482741:
2309          * The 4G area that maps directly from
2310          * XIO space to PCI I/O space is busted
2311          * until Bridge Rev D.
2312          */
2313         if ((pcibr_soft->bs_rev_num > BRIDGE_PART_REV_C) &&
2314             ((pci_addr + BRIDGE_PCI_IO_BASE + req_size - 1) <=
2315              BRIDGE_PCI_IO_LIMIT))
2316             xio_addr = pci_addr + BRIDGE_PCI_IO_BASE;
2317         break;
2318     }
2319 
2320     /* Check that "Direct PIO" byteswapping matches,
2321      * try to change it if it does not.
2322      */
2323     if (xio_addr != XIO_NOWHERE) {
2324         unsigned                bst;    /* nonzero to set bytestream */
2325         unsigned               *bfp;    /* addr of record of how swapper is set */
2326         unsigned                swb;    /* which control bit to mung */
2327         unsigned                bfo;    /* current swapper setting */
2328         unsigned                bfn;    /* desired swapper setting */
2329 
2330         bfp = ((space == PCIIO_SPACE_IO)
2331                ? (&pcibr_soft->bs_pio_end_io)
2332                : (&pcibr_soft->bs_pio_end_mem));
2333 
2334         bfo = *bfp;
2335 
2336         bst = flags & PCIIO_BYTE_STREAM;
2337 
2338         bfn = bst ? PCIIO_BYTE_STREAM : PCIIO_WORD_VALUES;
2339 
2340         if (bfn == bfo) {               /* we already match. */
2341             ;
2342         } else if (bfo != 0) {          /* we have a conflict. */
2343             if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2344                 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2345             }
2346             xio_addr = XIO_NOWHERE;
2347         } else {                        /* OK to make the change. */
2348             swb = (space == PCIIO_SPACE_IO) ? BRIDGE_CTRL_IO_SWAP : BRIDGE_CTRL_MEM_SWAP;
2349             if ( IS_PIC_SOFT(pcibr_soft) ) {
2350                 picreg_t             octl, nctl;
2351                 octl = bridge->p_wid_control_64;
2352                 nctl = bst ? octl | (uint64_t)swb : octl & ((uint64_t)~swb);
2353 
2354                 if (octl != nctl)               /* make the change if any */
2355                         bridge->b_wid_control = nctl;
2356             }
2357             else {
2358                 picreg_t             octl, nctl;
2359                 if (io_get_sh_swapper(NASID_GET(bridge))) {
2360                         octl = BRIDGE_REG_GET32((&bridge->b_wid_control));
2361                         nctl = bst ? octl | swb : octl & ~swb;
2362 
2363                         if (octl != nctl)           /* make the change if any */
2364                                 BRIDGE_REG_SET32((&bridge->b_wid_control)) = __swab32(nctl);
2365                 } else {
2366                         octl = bridge->b_wid_control;
2367                         nctl = bst ? octl | swb : octl & ~swb;
2368 
2369                         if (octl != nctl)           /* make the change if any */
2370                                 bridge->b_wid_control = nctl;
2371                 }
2372             }
2373             *bfp = bfn;                 /* record the assignment */
2374 
2375             if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2376                 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2377             }
2378         }
2379     }
2380   done:
2381     pcibr_unlock(pcibr_soft, s);
2382     return xio_addr;
2383 }
2384 
2385 /*ARGSUSED6 */
2386 pcibr_piomap_t
2387 pcibr_piomap_alloc(vertex_hdl_t pconn_vhdl,
2388                    device_desc_t dev_desc,
2389                    pciio_space_t space,
2390                    iopaddr_t pci_addr,
2391                    size_t req_size,
2392                    size_t req_size_max,
2393                    unsigned flags)
2394 {
2395     pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
2396     pciio_info_t            pciio_info = &pcibr_info->f_c;
2397     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2398     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2399     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
2400 
2401     pcibr_piomap_t         *mapptr;
2402     pcibr_piomap_t          maplist;
2403     pcibr_piomap_t          pcibr_piomap;
2404     iopaddr_t               xio_addr;
2405     xtalk_piomap_t          xtalk_piomap;
2406     unsigned long           s;
2407 
2408     /* Make sure that the req sizes are non-zero */
2409     if ((req_size < 1) || (req_size_max < 1)) {
2410         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2411                     "pcibr_piomap_alloc: req_size | req_size_max < 1\n"));
2412         return NULL;
2413     }
2414 
2415     /*
2416      * Code to translate slot/space/addr
2417      * into xio_addr is common between
2418      * this routine and pcibr_piotrans_addr.
2419      */
2420     xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
2421 
2422     if (xio_addr == XIO_NOWHERE) {
2423         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2424                     "pcibr_piomap_alloc: xio_addr == XIO_NOWHERE\n"));
2425         return NULL;
2426     }
2427 
2428     /* Check the piomap list to see if there is already an allocated
2429      * piomap entry but not in use. If so use that one. Otherwise
2430      * allocate a new piomap entry and add it to the piomap list
2431      */
2432     mapptr = &(pcibr_info->f_piomap);
2433 
2434     s = pcibr_lock(pcibr_soft);
2435     for (pcibr_piomap = *mapptr;
2436          pcibr_piomap != NULL;
2437          pcibr_piomap = pcibr_piomap->bp_next) {
2438         if (pcibr_piomap->bp_mapsz == 0)
2439             break;
2440     }
2441 
2442     if (pcibr_piomap)
2443         mapptr = NULL;
2444     else {
2445         pcibr_unlock(pcibr_soft, s);
2446         NEW(pcibr_piomap);
2447     }
2448 
2449     pcibr_piomap->bp_dev = pconn_vhdl;
2450     pcibr_piomap->bp_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, pciio_slot);
2451     pcibr_piomap->bp_flags = flags;
2452     pcibr_piomap->bp_space = space;
2453     pcibr_piomap->bp_pciaddr = pci_addr;
2454     pcibr_piomap->bp_mapsz = req_size;
2455     pcibr_piomap->bp_soft = pcibr_soft;
2456     pcibr_piomap->bp_toc[0] = ATOMIC_INIT(0);
2457 
2458     if (mapptr) {
2459         s = pcibr_lock(pcibr_soft);
2460         maplist = *mapptr;
2461         pcibr_piomap->bp_next = maplist;
2462         *mapptr = pcibr_piomap;
2463     }
2464     pcibr_unlock(pcibr_soft, s);
2465 
2466 
2467     if (pcibr_piomap) {
2468         xtalk_piomap =
2469             xtalk_piomap_alloc(xconn_vhdl, 0,
2470                                xio_addr,
2471                                req_size, req_size_max,
2472                                flags & PIOMAP_FLAGS);
2473         if (xtalk_piomap) {
2474             pcibr_piomap->bp_xtalk_addr = xio_addr;
2475             pcibr_piomap->bp_xtalk_pio = xtalk_piomap;
2476         } else {
2477             pcibr_piomap->bp_mapsz = 0;
2478             pcibr_piomap = 0;
2479         }
2480     }
2481     
2482     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2483                 "pcibr_piomap_alloc: map=0x%x\n", pcibr_piomap));
2484 
2485     return pcibr_piomap;
2486 }
2487 
2488 /*ARGSUSED */
2489 void
2490 pcibr_piomap_free(pcibr_piomap_t pcibr_piomap)
2491 {
2492     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2493                 "pcibr_piomap_free: map=0x%x\n", pcibr_piomap));
2494 
2495     xtalk_piomap_free(pcibr_piomap->bp_xtalk_pio);
2496     pcibr_piomap->bp_xtalk_pio = 0;
2497     pcibr_piomap->bp_mapsz = 0;
2498 }
2499 
2500 /*ARGSUSED */
2501 caddr_t
2502 pcibr_piomap_addr(pcibr_piomap_t pcibr_piomap,
2503                   iopaddr_t pci_addr,
2504                   size_t req_size)
2505 {
2506     caddr_t     addr;
2507     addr = xtalk_piomap_addr(pcibr_piomap->bp_xtalk_pio,
2508                              pcibr_piomap->bp_xtalk_addr +
2509                              pci_addr - pcibr_piomap->bp_pciaddr,
2510                              req_size);
2511     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2512                 "pcibr_piomap_free: map=0x%x, addr=0x%x\n", 
2513                 pcibr_piomap, addr));
2514 
2515     return(addr);
2516 }
2517 
2518 /*ARGSUSED */
2519 void
2520 pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)
2521 {
2522     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2523                 "pcibr_piomap_done: map=0x%x\n", pcibr_piomap));
2524     xtalk_piomap_done(pcibr_piomap->bp_xtalk_pio);
2525 }
2526 
2527 /*ARGSUSED */
2528 caddr_t
2529 pcibr_piotrans_addr(vertex_hdl_t pconn_vhdl,
2530                     device_desc_t dev_desc,
2531                     pciio_space_t space,
2532                     iopaddr_t pci_addr,
2533                     size_t req_size,
2534                     unsigned flags)
2535 {
2536     pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
2537     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2538     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2539     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
2540 
2541     iopaddr_t               xio_addr;
2542     caddr_t                 addr;
2543 
2544     xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
2545 
2546     if (xio_addr == XIO_NOWHERE) {
2547         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIODIR, pconn_vhdl,
2548                     "pcibr_piotrans_addr: xio_addr == XIO_NOWHERE\n"));
2549         return NULL;
2550     }
2551 
2552     addr = xtalk_piotrans_addr(xconn_vhdl, 0, xio_addr, req_size, flags & PIOMAP_FLAGS);
2553     PCIBR_DEBUG((PCIBR_DEBUG_PIODIR, pconn_vhdl,
2554                 "pcibr_piotrans_addr: xio_addr=0x%x, addr=0x%x\n",
2555                 xio_addr, addr));
2556     return(addr);
2557 }
2558 
2559 /*
2560  * PIO Space allocation and management.
2561  *      Allocate and Manage the PCI PIO space (mem and io space)
2562  *      This routine is pretty simplistic at this time, and
2563  *      does pretty trivial management of allocation and freeing.
2564  *      The current scheme is prone for fragmentation.
2565  *      Change the scheme to use bitmaps.
2566  */
2567 
2568 /*ARGSUSED */
2569 iopaddr_t
2570 pcibr_piospace_alloc(vertex_hdl_t pconn_vhdl,
2571                      device_desc_t dev_desc,
2572                      pciio_space_t space,
2573                      size_t req_size,
2574                      size_t alignment)
2575 {
2576     pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
2577     pciio_info_t            pciio_info = &pcibr_info->f_c;
2578     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2579 
2580     pciio_piospace_t        piosp;
2581     unsigned long           s;
2582 
2583     iopaddr_t               start_addr;
2584     size_t                  align_mask;
2585 
2586     /*
2587      * Check for proper alignment
2588      */
2589     ASSERT(alignment >= NBPP);
2590     ASSERT((alignment & (alignment - 1)) == 0);
2591 
2592     align_mask = alignment - 1;
2593     s = pcibr_lock(pcibr_soft);
2594 
2595     /*
2596      * First look if a previously allocated chunk exists.
2597      */
2598     if ((piosp = pcibr_info->f_piospace)) {
2599         /*
2600          * Look through the list for a right sized free chunk.
2601          */
2602         do {
2603             if (piosp->free &&
2604                 (piosp->space == space) &&
2605                 (piosp->count >= req_size) &&
2606                 !(piosp->start & align_mask)) {
2607                 piosp->free = 0;
2608                 pcibr_unlock(pcibr_soft, s);
2609                 return piosp->start;
2610             }
2611             piosp = piosp->next;
2612         } while (piosp);
2613     }
2614     ASSERT(!piosp);
2615 
2616     /*
2617      * Allocate PCI bus address, usually for the Universe chip driver;
2618      * do not pass window info since the actual PCI bus address
2619      * space will never be freed.  The space may be reused after it
2620      * is logically released by pcibr_piospace_free().
2621      */
2622     switch (space) {
2623     case PCIIO_SPACE_IO:
2624         start_addr = pcibr_bus_addr_alloc(pcibr_soft, NULL,
2625                                           PCIIO_SPACE_IO,
2626                                           0, req_size, alignment);
2627         break;
2628 
2629     case PCIIO_SPACE_MEM:
2630     case PCIIO_SPACE_MEM32:
2631         start_addr = pcibr_bus_addr_alloc(pcibr_soft, NULL,
2632                                           PCIIO_SPACE_MEM32,
2633                                           0, req_size, alignment);
2634         break;
2635 
2636     default:
2637         ASSERT(0);
2638         pcibr_unlock(pcibr_soft, s);
2639         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2640                     "pcibr_piospace_alloc: unknown space %d\n", space));
2641         return 0;
2642     }
2643 
2644     /*
2645      * If too big a request, reject it.
2646      */
2647     if (!start_addr) {
2648         pcibr_unlock(pcibr_soft, s);
2649         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2650                     "pcibr_piospace_alloc: request 0x%x to big\n", req_size));
2651         return 0;
2652     }
2653 
2654     NEW(piosp);
2655     piosp->free = 0;
2656     piosp->space = space;
2657     piosp->start = start_addr;
2658     piosp->count = req_size;
2659     piosp->next = pcibr_info->f_piospace;
2660     pcibr_info->f_piospace = piosp;
2661 
2662     pcibr_unlock(pcibr_soft, s);
2663 
2664     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2665                 "pcibr_piospace_alloc: piosp=0x%x\n", piosp));
2666 
2667     return start_addr;
2668 }
2669 
2670 /*ARGSUSED */
2671 void
2672 pcibr_piospace_free(vertex_hdl_t pconn_vhdl,
2673                     pciio_space_t space,
2674                     iopaddr_t pciaddr,
2675                     size_t req_size)
2676 {
2677     pcibr_info_t            pcibr_info = pcibr_info_get(pconn_vhdl);
2678     pciio_piospace_t        piosp;
2679     unsigned long           s;
2680     char                    name[1024];
2681 
2682     /*
2683      * Look through the bridge data structures for the pciio_piospace_t
2684      * structure corresponding to  'pciaddr'
2685      */
2686     s = pcibr_lock(pcibr_soft);
2687     piosp = pcibr_info->f_piospace;
2688     while (piosp) {
2689         /*
2690          * Piospace free can only be for the complete
2691          * chunk and not parts of it..
2692          */
2693         if (piosp->start == pciaddr) {
2694             if (piosp->count == req_size)
2695                 break;
2696             /*
2697              * Improper size passed for freeing..
2698              * Print a message and break;
2699              */
2700             hwgraph_vertex_name_get(pconn_vhdl, name, 1024);
2701             printk(KERN_WARNING  "pcibr_piospace_free: error");
2702             printk(KERN_WARNING  "Device %s freeing size (0x%lx) different than allocated (0x%lx)",
2703                                         name, req_size, piosp->count);
2704             printk(KERN_WARNING  "Freeing 0x%lx instead", piosp->count);
2705             break;
2706         }
2707         piosp = piosp->next;
2708     }
2709 
2710     if (!piosp) {
2711         printk(KERN_WARNING  
2712                 "pcibr_piospace_free: Address 0x%lx size 0x%lx - No match\n",
2713                 pciaddr, req_size);
2714         pcibr_unlock(pcibr_soft, s);
2715         return;
2716     }
2717     piosp->free = 1;
2718     pcibr_unlock(pcibr_soft, s);
2719 
2720     PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2721                 "pcibr_piospace_free: piosp=0x%x\n", piosp));
2722     return;
2723 }
2724 
2725 /* =====================================================================
2726  *    DMA MANAGEMENT
2727  *
2728  *      The Bridge ASIC provides three methods of doing
2729  *      DMA: via a "direct map" register available in
2730  *      32-bit PCI space (which selects a contiguous 2G
2731  *      address space on some other widget), via
2732  *      "direct" addressing via 64-bit PCI space (all
2733  *      destination information comes from the PCI
2734  *      address, including transfer attributes), and via
2735  *      a "mapped" region that allows a bunch of
2736  *      different small mappings to be established with
2737  *      the PMU.
2738  *
2739  *      For efficiency, we most prefer to use the 32-bit
2740  *      direct mapping facility, since it requires no
2741  *      resource allocations. The advantage of using the
2742  *      PMU over the 64-bit direct is that single-cycle
2743  *      PCI addressing can be used; the advantage of
2744  *      using 64-bit direct over PMU addressing is that
2745  *      we do not have to allocate entries in the PMU.
2746  */
2747 
2748 /*
2749  * Convert PCI-generic software flags and Bridge-specific software flags
2750  * into Bridge-specific Direct Map attribute bits.
2751  */
2752 static iopaddr_t
2753 pcibr_flags_to_d64(unsigned flags, pcibr_soft_t pcibr_soft)
2754 {
2755     iopaddr_t               attributes = 0;
2756 
2757     /* Sanity check: Bridge only allows use of VCHAN1 via 64-bit addrs */
2758 #ifdef LATER
2759     ASSERT_ALWAYS(!(flags & PCIBR_VCHAN1) || (flags & PCIIO_DMA_A64));
2760 #endif
2761 
2762     /* Generic macro flags
2763      */
2764     if (flags & PCIIO_DMA_DATA) {       /* standard data channel */
2765         attributes &= ~PCI64_ATTR_BAR;  /* no barrier bit */
2766         attributes |= PCI64_ATTR_PREF;  /* prefetch on */
2767     }
2768     if (flags & PCIIO_DMA_CMD) {        /* standard command channel */
2769         attributes |= PCI64_ATTR_BAR;   /* barrier bit on */
2770         attributes &= ~PCI64_ATTR_PREF; /* disable prefetch */
2771     }
2772     /* Generic detail flags
2773      */
2774     if (flags & PCIIO_PREFETCH)
2775         attributes |= PCI64_ATTR_PREF;
2776     if (flags & PCIIO_NOPREFETCH)
2777         attributes &= ~PCI64_ATTR_PREF;
2778 
2779     /* the swap bit is in the address attributes for xbridge */
2780     if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
2781         if (flags & PCIIO_BYTE_STREAM)
2782                 attributes |= PCI64_ATTR_SWAP;
2783         if (flags & PCIIO_WORD_VALUES)
2784                 attributes &= ~PCI64_ATTR_SWAP;
2785     }
2786 
2787     /* Provider-specific flags
2788      */
2789     if (flags & PCIBR_BARRIER)
2790         attributes |= PCI64_ATTR_BAR;
2791     if (flags & PCIBR_NOBARRIER)
2792         attributes &= ~PCI64_ATTR_BAR;
2793 
2794     if (flags & PCIBR_PREFETCH)
2795         attributes |= PCI64_ATTR_PREF;
2796     if (flags & PCIBR_NOPREFETCH)
2797         attributes &= ~PCI64_ATTR_PREF;
2798 
2799     if (flags & PCIBR_PRECISE)
2800         attributes |= PCI64_ATTR_PREC;
2801     if (flags & PCIBR_NOPRECISE)
2802         attributes &= ~PCI64_ATTR_PREC;
2803 
2804     if (flags & PCIBR_VCHAN1)
2805         attributes |= PCI64_ATTR_VIRTUAL;
2806     if (flags & PCIBR_VCHAN0)
2807         attributes &= ~PCI64_ATTR_VIRTUAL;
2808 
2809     /* PIC in PCI-X mode only supports barrier & swap */
2810     if (IS_PCIX(pcibr_soft)) {
2811         attributes &= (PCI64_ATTR_BAR | PCI64_ATTR_SWAP);
2812     }
2813 
2814     return (attributes);
2815 }
2816 
2817 /*ARGSUSED */
2818 pcibr_dmamap_t
2819 pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
2820                    device_desc_t dev_desc,
2821                    size_t req_size_max,
2822                    unsigned flags)
2823 {
2824     pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
2825     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2826     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
2827     pciio_slot_t            slot;
2828     xwidgetnum_t            xio_port;
2829 
2830     xtalk_dmamap_t          xtalk_dmamap;
2831     pcibr_dmamap_t          pcibr_dmamap;
2832     int                     ate_count;
2833     int                     ate_index;
2834     int                     vchan = VCHAN0;
2835 
2836     /* merge in forced flags */
2837     flags |= pcibr_soft->bs_dma_flags;
2838 
2839     /*
2840      * On SNIA64, these maps are pre-allocated because pcibr_dmamap_alloc()
2841      * can be called within an interrupt thread.
2842      */
2843     pcibr_dmamap = (pcibr_dmamap_t)get_free_pciio_dmamap(pcibr_soft->bs_vhdl);
2844 
2845     if (!pcibr_dmamap)
2846         return 0;
2847 
2848     xtalk_dmamap = xtalk_dmamap_alloc(xconn_vhdl, dev_desc, req_size_max,
2849                                       flags & DMAMAP_FLAGS);
2850     if (!xtalk_dmamap) {
2851         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
2852                     "pcibr_dmamap_alloc: xtalk_dmamap_alloc failed\n"));
2853         free_pciio_dmamap(pcibr_dmamap);
2854         return 0;
2855     }
2856     xio_port = pcibr_soft->bs_mxid;
2857     slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2858 
2859     pcibr_dmamap->bd_dev = pconn_vhdl;
2860     pcibr_dmamap->bd_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot);
2861     pcibr_dmamap->bd_soft = pcibr_soft;
2862     pcibr_dmamap->bd_xtalk = xtalk_dmamap;
2863     pcibr_dmamap->bd_max_size = req_size_max;
2864     pcibr_dmamap->bd_xio_port = xio_port;
2865 
2866     if (flags & PCIIO_DMA_A64) {
2867         if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D64_BITS)) {
2868             iopaddr_t               pci_addr;
2869             int                     have_rrbs;
2870             int                     min_rrbs;
2871 
2872             /* Device is capable of A64 operations,
2873              * and the attributes of the DMA are
2874              * consistent with any previous DMA
2875              * mappings using shared resources.
2876              */
2877 
2878             pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
2879 
2880             pcibr_dmamap->bd_flags = flags;
2881             pcibr_dmamap->bd_xio_addr = 0;
2882             pcibr_dmamap->bd_pci_addr = pci_addr;
2883 
2884             /* If in PCI mode, make sure we have an RRB (or two). 
2885              */
2886             if (IS_PCI(pcibr_soft) && 
2887                 !(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
2888                 if (flags & PCIBR_VCHAN1)
2889                     vchan = VCHAN1;
2890                 have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
2891                 if (have_rrbs < 2) {
2892                     if (pci_addr & PCI64_ATTR_PREF)
2893                         min_rrbs = 2;
2894                     else
2895                         min_rrbs = 1;
2896                     if (have_rrbs < min_rrbs)
2897                         do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
2898                                                min_rrbs - have_rrbs);
2899                 }
2900             }
2901             PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
2902                         "pcibr_dmamap_alloc: using direct64, map=0x%x\n",
2903                         pcibr_dmamap));
2904             return pcibr_dmamap;
2905         }
2906         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
2907                     "pcibr_dmamap_alloc: unable to use direct64\n"));
2908 
2909         /* PIC only supports 64-bit direct mapping in PCI-X mode. */
2910         if (IS_PCIX(pcibr_soft)) {
2911             DEL(pcibr_dmamap);
2912             return 0;
2913         }
2914 
2915         flags &= ~PCIIO_DMA_A64;
2916     }
2917     if (flags & PCIIO_FIXED) {
2918         /* warning: mappings may fail later,
2919          * if direct32 can't get to the address.
2920          */
2921         if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) {
2922             /* User desires DIRECT A32 operations,
2923              * and the attributes of the DMA are
2924              * consistent with any previous DMA
2925              * mappings using shared resources.
2926              * Mapping calls may fail if target
2927              * is outside the direct32 range.
2928              */
2929             PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
2930                         "pcibr_dmamap_alloc: using direct32, map=0x%x\n", 
2931                         pcibr_dmamap));
2932             pcibr_dmamap->bd_flags = flags;
2933             pcibr_dmamap->bd_xio_addr = pcibr_soft->bs_dir_xbase;
2934             pcibr_dmamap->bd_pci_addr = PCI32_DIRECT_BASE;
2935             return pcibr_dmamap;
2936         }
2937         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
2938                     "pcibr_dmamap_alloc: unable to use direct32\n"));
2939 
2940         /* If the user demands FIXED and we can't
2941          * give it to him, fail.
2942          */
2943         xtalk_dmamap_free(xtalk_dmamap);
2944         free_pciio_dmamap(pcibr_dmamap);
2945         return 0;
2946     }
2947     /*
2948      * Allocate Address Translation Entries from the mapping RAM.
2949      * Unless the PCIBR_NO_ATE_ROUNDUP flag is specified,
2950      * the maximum number of ATEs is based on the worst-case
2951      * scenario, where the requested target is in the
2952      * last byte of an ATE; thus, mapping IOPGSIZE+2
2953      * does end up requiring three ATEs.
2954      */
2955     if (!(flags & PCIBR_NO_ATE_ROUNDUP)) {
2956         ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
2957                      +req_size_max      /* max mapping bytes */
2958                      - 1) + 1;          /* round UP */
2959     } else {    /* assume requested target is page aligned */
2960         ate_count = IOPG(req_size_max   /* max mapping bytes */
2961                      - 1) + 1;          /* round UP */
2962     }
2963 
2964     ate_index = pcibr_ate_alloc(pcibr_soft, ate_count);
2965 
2966     if (ate_index != -1) {
2967         if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_PMU_BITS)) {
2968             bridge_ate_t            ate_proto;
2969             int                     have_rrbs;
2970             int                     min_rrbs;
2971 
2972             PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
2973                         "pcibr_dmamap_alloc: using PMU, ate_index=%d, "
2974                         "pcibr_dmamap=0x%x\n", ate_index, pcibr_dmamap));
2975 
2976             ate_proto = pcibr_flags_to_ate(flags);
2977 
2978             pcibr_dmamap->bd_flags = flags;
2979             pcibr_dmamap->bd_pci_addr =
2980                 PCI32_MAPPED_BASE + IOPGSIZE * ate_index;
2981             /*
2982              * for xbridge the byte-swap bit == bit 29 of PCI address
2983              */
2984             if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
2985                     if (flags & PCIIO_BYTE_STREAM)
2986                             ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
2987                     /*
2988                      * If swap was set in bss_device in pcibr_endian_set()
2989                      * we need to change the address bit.
2990                      */
2991                     if (pcibr_soft->bs_slot[slot].bss_device & 
2992                                                         BRIDGE_DEV_SWAP_PMU)
2993                             ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
2994                     if (flags & PCIIO_WORD_VALUES)
2995                             ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);
2996             }
2997             pcibr_dmamap->bd_xio_addr = 0;
2998             pcibr_dmamap->bd_ate_ptr = pcibr_ate_addr(pcibr_soft, ate_index);
2999             pcibr_dmamap->bd_ate_index = ate_index;
3000             pcibr_dmamap->bd_ate_count = ate_count;
3001             pcibr_dmamap->bd_ate_proto = ate_proto;
3002 
3003             /* Make sure we have an RRB (or two).
3004              */
3005             if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
3006                 have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
3007                 if (have_rrbs < 2) {
3008                     if (ate_proto & ATE_PREF)
3009                         min_rrbs = 2;
3010                     else
3011                         min_rrbs = 1;
3012                     if (have_rrbs < min_rrbs)
3013                         do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
3014                                                min_rrbs - have_rrbs);
3015                 }
3016             }
3017             if (ate_index >= pcibr_soft->bs_int_ate_size && 
3018                                 !IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
3019                 bridge_t               *bridge = pcibr_soft->bs_base;
3020                 volatile unsigned      *cmd_regp;
3021                 unsigned                cmd_reg;
3022                 unsigned long           s;
3023 
3024                 pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_SSRAM;
3025 
3026                 s = pcibr_lock(pcibr_soft);
3027                 cmd_regp = pcibr_slot_config_addr(bridge, slot, 
3028                                                 PCI_CFG_COMMAND/4);
3029                 if ( IS_PIC_SOFT(pcibr_soft) ) {
3030                         cmd_reg = pcibr_slot_config_get(bridge, slot, PCI_CFG_COMMAND/4);
3031                 }
3032                 else {
3033                         if (io_get_sh_swapper(NASID_GET(bridge))) {
3034                                 BRIDGE_REG_SET32((&cmd_reg)) = __swab32(*cmd_regp);
3035                         } else {
3036                                 cmd_reg = pcibr_slot_config_get(bridge, slot, PCI_CFG_COMMAND/4);
3037                         }
3038                 }
3039                 pcibr_soft->bs_slot[slot].bss_cmd_pointer = cmd_regp;
3040                 pcibr_soft->bs_slot[slot].bss_cmd_shadow = cmd_reg;
3041                 pcibr_unlock(pcibr_soft, s);
3042             }
3043             return pcibr_dmamap;
3044         }
3045         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3046                     "pcibr_dmamap_alloc: PMU use failed, ate_index=%d\n",
3047                     ate_index));
3048 
3049         pcibr_ate_free(pcibr_soft, ate_index, ate_count);
3050     }
3051     /* total failure: sorry, you just can't
3052      * get from here to there that way.
3053      */
3054     PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3055                 "pcibr_dmamap_alloc: complete failure.\n"));
3056     xtalk_dmamap_free(xtalk_dmamap);
3057     free_pciio_dmamap(pcibr_dmamap);
3058     return 0;
3059 }
3060 
3061 /*ARGSUSED */
3062 void
3063 pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)
3064 {
3065     pcibr_soft_t            pcibr_soft = pcibr_dmamap->bd_soft;
3066     pciio_slot_t            slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
3067                                                         pcibr_dmamap->bd_slot);
3068 
3069     unsigned                flags = pcibr_dmamap->bd_flags;
3070 
3071     /* Make sure that bss_ext_ates_active
3072      * is properly kept up to date.
3073      */
3074 
3075     if (PCIBR_DMAMAP_BUSY & flags)
3076         if (PCIBR_DMAMAP_SSRAM & flags)
3077             atomic_dec(&(pcibr_soft->bs_slot[slot]. bss_ext_ates_active));
3078 
3079     xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);
3080 
3081     if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {
3082         pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_D64_BITS);
3083     }
3084     if (pcibr_dmamap->bd_ate_count) {
3085         pcibr_ate_free(pcibr_dmamap->bd_soft,
3086                        pcibr_dmamap->bd_ate_index,
3087                        pcibr_dmamap->bd_ate_count);
3088         pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_PMU_BITS);
3089     }
3090 
3091     PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3092                 "pcibr_dmamap_free: pcibr_dmamap=0x%x\n", pcibr_dmamap));
3093 
3094     free_pciio_dmamap(pcibr_dmamap);
3095 }
3096 
3097 /*
3098  *    pcibr_addr_xio_to_pci: given a PIO range, hand
3099  *      back the corresponding base PCI MEM address;
3100  *      this is used to short-circuit DMA requests that
3101  *      loop back onto this PCI bus.
3102  */
3103 static iopaddr_t
3104 pcibr_addr_xio_to_pci(pcibr_soft_t soft,
3105                       iopaddr_t xio_addr,
3106                       size_t req_size)
3107 {
3108     iopaddr_t               xio_lim = xio_addr + req_size - 1;
3109     iopaddr_t               pci_addr;
3110     pciio_slot_t            slot;
3111 
3112     if (IS_PIC_BUSNUM_SOFT(soft, 0)) {
3113         if ((xio_addr >= PICBRIDGE0_PCI_MEM32_BASE) &&
3114             (xio_lim <= PICBRIDGE0_PCI_MEM32_LIMIT)) {
3115             pci_addr = xio_addr - PICBRIDGE0_PCI_MEM32_BASE;
3116             return pci_addr;
3117         }
3118         if ((xio_addr >= PICBRIDGE0_PCI_MEM64_BASE) &&
3119             (xio_lim <= PICBRIDGE0_PCI_MEM64_LIMIT)) {
3120             pci_addr = xio_addr - PICBRIDGE0_PCI_MEM64_BASE;
3121             return pci_addr;
3122         }
3123     } else if (IS_PIC_BUSNUM_SOFT(soft, 1)) {
3124         if ((xio_addr >= PICBRIDGE1_PCI_MEM32_BASE) &&
3125             (xio_lim <= PICBRIDGE1_PCI_MEM32_LIMIT)) {
3126             pci_addr = xio_addr - PICBRIDGE1_PCI_MEM32_BASE;
3127             return pci_addr;
3128         }
3129         if ((xio_addr >= PICBRIDGE1_PCI_MEM64_BASE) &&
3130             (xio_lim <= PICBRIDGE1_PCI_MEM64_LIMIT)) {
3131             pci_addr = xio_addr - PICBRIDGE1_PCI_MEM64_BASE;
3132             return pci_addr;
3133         }
3134     } else {
3135     if ((xio_addr >= BRIDGE_PCI_MEM32_BASE) &&
3136         (xio_lim <= BRIDGE_PCI_MEM32_LIMIT)) {
3137         pci_addr = xio_addr - BRIDGE_PCI_MEM32_BASE;
3138         return pci_addr;
3139     }
3140     if ((xio_addr >= BRIDGE_PCI_MEM64_BASE) &&
3141         (xio_lim <= BRIDGE_PCI_MEM64_LIMIT)) {
3142         pci_addr = xio_addr - BRIDGE_PCI_MEM64_BASE;
3143         return pci_addr;
3144     }
3145     }
3146     for (slot = soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(soft); ++slot)
3147         if ((xio_addr >= PCIBR_BRIDGE_DEVIO(soft, slot)) &&
3148             (xio_lim < PCIBR_BRIDGE_DEVIO(soft, slot + 1))) {
3149             bridgereg_t             dev;
3150 
3151             dev = soft->bs_slot[slot].bss_device;
3152             pci_addr = dev & BRIDGE_DEV_OFF_MASK;
3153             pci_addr <<= BRIDGE_DEV_OFF_ADDR_SHFT;
3154             pci_addr += xio_addr - PCIBR_BRIDGE_DEVIO(soft, slot);
3155             return (dev & BRIDGE_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;
3156         }
3157     return 0;
3158 }
3159 
3160 /*ARGSUSED */
3161 iopaddr_t
3162 pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
3163                   paddr_t paddr,
3164                   size_t req_size)
3165 {
3166     pcibr_soft_t            pcibr_soft;
3167     iopaddr_t               xio_addr;
3168     xwidgetnum_t            xio_port;
3169     iopaddr_t               pci_addr;
3170     unsigned                flags;
3171 
3172     ASSERT(pcibr_dmamap != NULL);
3173     ASSERT(req_size > 0);
3174     ASSERT(req_size <= pcibr_dmamap->bd_max_size);
3175 
3176     pcibr_soft = pcibr_dmamap->bd_soft;
3177 
3178     flags = pcibr_dmamap->bd_flags;
3179 
3180     xio_addr = xtalk_dmamap_addr(pcibr_dmamap->bd_xtalk, paddr, req_size);
3181     if (XIO_PACKED(xio_addr)) {
3182         xio_port = XIO_PORT(xio_addr);
3183         xio_addr = XIO_ADDR(xio_addr);
3184     } else
3185         xio_port = pcibr_dmamap->bd_xio_port;
3186 
3187     /* If this DMA is to an address that
3188      * refers back to this Bridge chip,
3189      * reduce it back to the correct
3190      * PCI MEM address.
3191      */
3192     if (xio_port == pcibr_soft->bs_xid) {
3193         pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
3194     } else if (flags & PCIIO_DMA_A64) {
3195         /* A64 DMA:
3196          * always use 64-bit direct mapping,
3197          * which always works.
3198          * Device(x) was set up during
3199          * dmamap allocation.
3200          */
3201 
3202         /* attributes are already bundled up into bd_pci_addr.
3203          */
3204         pci_addr = pcibr_dmamap->bd_pci_addr
3205             | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT)
3206             | xio_addr;
3207 
3208         /* Bridge Hardware WAR #482836:
3209          * If the transfer is not cache aligned
3210          * and the Bridge Rev is <= B, force
3211          * prefetch to be off.
3212          */
3213         if (flags & PCIBR_NOPREFETCH)
3214             pci_addr &= ~PCI64_ATTR_PREF;
3215 
3216         PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, 
3217                     pcibr_dmamap->bd_dev,
3218                     "pcibr_dmamap_addr: (direct64): wanted paddr [0x%x..0x%x] "
3219                     "XIO port 0x%x offset 0x%x, returning PCI 0x%x\n",
3220                     paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
3221 
3222     } else if (flags & PCIIO_FIXED) {
3223         /* A32 direct DMA:
3224          * always use 32-bit direct mapping,
3225          * which may fail.
3226          * Device(x) was set up during
3227          * dmamap allocation.
3228          */
3229 
3230         if (xio_port != pcibr_soft->bs_dir_xport)
3231             pci_addr = 0;               /* wrong DIDN */
3232         else if (xio_addr < pcibr_dmamap->bd_xio_addr)
3233             pci_addr = 0;               /* out of range */
3234         else if ((xio_addr + req_size) >
3235                  (pcibr_dmamap->bd_xio_addr + BRIDGE_DMA_DIRECT_SIZE))
3236             pci_addr = 0;               /* out of range */
3237         else
3238             pci_addr = pcibr_dmamap->bd_pci_addr +
3239                 xio_addr - pcibr_dmamap->bd_xio_addr;
3240 
3241         PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, 
3242                     pcibr_dmamap->bd_dev,
3243                     "pcibr_dmamap_addr (direct32): wanted paddr [0x%x..0x%x] "
3244                     "XIO port 0x%x offset 0x%x, returning PCI 0x%x\n",
3245                     paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
3246 
3247     } else {
3248         bridge_t               *bridge = pcibr_soft->bs_base;
3249         iopaddr_t               offset = IOPGOFF(xio_addr);
3250         bridge_ate_t            ate_proto = pcibr_dmamap->bd_ate_proto;
3251         int                     ate_count = IOPG(offset + req_size - 1) + 1;
3252 
3253         int                     ate_index = pcibr_dmamap->bd_ate_index;
3254         unsigned                cmd_regs[8];
3255         unsigned                s;
3256 
3257 #if PCIBR_FREEZE_TIME
3258         int                     ate_total = ate_count;
3259         unsigned                freeze_time;
3260 #endif
3261         bridge_ate_p            ate_ptr = pcibr_dmamap->bd_ate_ptr;
3262         bridge_ate_t            ate;
3263 
3264         /* Bridge Hardware WAR #482836:
3265          * If the transfer is not cache aligned
3266          * and the Bridge Rev is <= B, force
3267          * prefetch to be off.
3268          */
3269         if (flags & PCIBR_NOPREFETCH)
3270             ate_proto &= ~ATE_PREF;
3271 
3272         ate = ate_proto
3273             | (xio_port << ATE_TIDSHIFT)
3274             | (xio_addr - offset);
3275 
3276         pci_addr = pcibr_dmamap->bd_pci_addr + offset;
3277 
3278         /* Fill in our mapping registers
3279          * with the appropriate xtalk data,
3280          * and hand back the PCI address.
3281          */
3282 
3283         ASSERT(ate_count > 0);
3284         if (ate_count <= pcibr_dmamap->bd_ate_count) {
3285                 ATE_FREEZE();
3286                 ATE_WRITE();
3287                 ATE_THAW();
3288                 if ( IS_PIC_SOFT(pcibr_soft) ) {
3289                         bridge->b_wid_tflush;   /* wait until Bridge PIO complete */
3290                 }
3291                 else {
3292                         if (io_get_sh_swapper(NASID_GET(bridge))) {
3293                                 BRIDGE_REG_GET32((&bridge->b_wid_tflush));
3294                         } else {
3295                                 bridge->b_wid_tflush;
3296                         }
3297                 }
3298                 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3299                             "pcibr_dmamap_addr (PMU) : wanted paddr "
3300                             "[0x%x..0x%x] returning PCI 0x%x\n", 
3301                             paddr, paddr + req_size - 1, pci_addr));
3302 
3303         } else {
3304                 /* The number of ATE's required is greater than the number
3305                  * allocated for this map. One way this can happen is if
3306                  * pcibr_dmamap_alloc() was called with the PCIBR_NO_ATE_ROUNDUP
3307                  * flag, and then when that map is used (right now), the
3308                  * target address tells us we really did need to roundup.
3309                  * The other possibility is that the map is just plain too
3310                  * small to handle the requested target area.
3311                  */
3312                 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev, 
3313                             "pcibr_dmamap_addr (PMU) : wanted paddr "
3314                             "[0x%x..0x%x] ate_count 0x%x bd_ate_count 0x%x "
3315                             "ATE's required > number allocated\n",
3316                              paddr, paddr + req_size - 1,
3317                              ate_count, pcibr_dmamap->bd_ate_count));
3318                 pci_addr = 0;
3319         }
3320 
3321     }
3322     return pci_addr;
3323 }
3324 
3325 /*ARGSUSED */
3326 void
3327 pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
3328 {
3329     /*
3330      * We could go through and invalidate ATEs here;
3331      * for performance reasons, we don't.
3332      * We also don't enforce the strict alternation
3333      * between _addr/_list and _done, but Hub does.
3334      */
3335 
3336     if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
3337         pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
3338 
3339         if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
3340             atomic_dec(&(pcibr_dmamap->bd_soft->bs_slot[pcibr_dmamap->bd_slot]. bss_ext_ates_active));
3341     }
3342     xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
3343 
3344     PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3345                 "pcibr_dmamap_done: pcibr_dmamap=0x%x\n", pcibr_dmamap));
3346 }
3347 
3348 
3349 /*
3350  * For each bridge, the DIR_OFF value in the Direct Mapping Register
3351  * determines the PCI to Crosstalk memory mapping to be used for all
3352  * 32-bit Direct Mapping memory accesses. This mapping can be to any
3353  * node in the system. This function will return that compact node id.
3354  */
3355 
3356 /*ARGSUSED */
3357 cnodeid_t
3358 pcibr_get_dmatrans_node(vertex_hdl_t pconn_vhdl)
3359 {
3360 
3361         pciio_info_t    pciio_info = pciio_info_get(pconn_vhdl);
3362         pcibr_soft_t    pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3363 
3364         return(NASID_TO_COMPACT_NODEID(NASID_GET(pcibr_soft->bs_dir_xbase)));
3365 }
3366 
3367 /*ARGSUSED */
3368 iopaddr_t
3369 pcibr_dmatrans_addr(vertex_hdl_t pconn_vhdl,
3370                     device_desc_t dev_desc,
3371                     paddr_t paddr,
3372                     size_t req_size,
3373                     unsigned flags)
3374 {
3375     pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
3376     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3377     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
3378     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3379     pcibr_soft_slot_t       slotp = &pcibr_soft->bs_slot[pciio_slot];
3380 
3381     xwidgetnum_t            xio_port;
3382     iopaddr_t               xio_addr;
3383     iopaddr_t               pci_addr;
3384 
3385     int                     have_rrbs;
3386     int                     min_rrbs;
3387     int                     vchan = VCHAN0;
3388 
3389     /* merge in forced flags */
3390     flags |= pcibr_soft->bs_dma_flags;
3391 
3392     xio_addr = xtalk_dmatrans_addr(xconn_vhdl, 0, paddr, req_size,
3393                                    flags & DMAMAP_FLAGS);
3394     if (!xio_addr) {
3395         PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3396                     "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3397                     "xtalk_dmatrans_addr failed with 0x%x\n",
3398                     paddr, paddr + req_size - 1, xio_addr));
3399         return 0;
3400     }
3401     /*
3402      * find which XIO port this goes to.
3403      */
3404     if (XIO_PACKED(xio_addr)) {
3405         if (xio_addr == XIO_NOWHERE) {
3406             PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3407                         "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3408                         "xtalk_dmatrans_addr failed with XIO_NOWHERE\n",
3409                         paddr, paddr + req_size - 1));
3410             return 0;
3411         }
3412         xio_port = XIO_PORT(xio_addr);
3413         xio_addr = XIO_ADDR(xio_addr);
3414 
3415     } else
3416         xio_port = pcibr_soft->bs_mxid;
3417 
3418     /*
3419      * If this DMA comes back to us,
3420      * return the PCI MEM address on
3421      * which it would land, or NULL
3422      * if the target is something
3423      * on bridge other than PCI MEM.
3424      */
3425     if (xio_port == pcibr_soft->bs_xid) {
3426         pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
3427         PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3428                     "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
3429                     "xio_port=0x%x, pci_addr=0x%x\n",
3430                     paddr, paddr + req_size - 1, xio_port, pci_addr));
3431         return pci_addr;
3432     }
3433     /* If the caller can use A64, try to
3434      * satisfy the request with the 64-bit
3435      * direct map. This can fail if the
3436      * configuration bits in Device(x)
3437      * conflict with our flags.
3438      */
3439 
3440     if (flags & PCIIO_DMA_A64) {
3441         pci_addr = slotp->bss_d64_base;
3442         if (!(flags & PCIBR_VCHAN1))
3443             flags |= PCIBR_VCHAN0;
3444         if ((pci_addr != PCIBR_D64_BASE_UNSET) &&
3445             (flags == slotp->bss_d64_flags)) {
3446 
3447             pci_addr |=  xio_addr
3448                 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
3449 
3450 #if HWG_PERF_CHECK
3451             if (xio_addr != 0x20000000)
3452 #endif
3453                 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3454                             "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
3455                             "xio_port=0x%x, direct64: pci_addr=0x%x\n",
3456                             paddr, paddr + req_size - 1, xio_addr, pci_addr));
3457             return (pci_addr);
3458         }
3459         if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS)) {
3460             pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
3461             slotp->bss_d64_flags = flags;
3462             slotp->bss_d64_base = pci_addr;
3463             pci_addr |= xio_addr
3464                 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
3465 
3466             /* If in PCI mode, make sure we have an RRB (or two).
3467              */
3468             if (IS_PCI(pcibr_soft) && 
3469                 !(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
3470                 if (flags & PCIBR_VCHAN1)
3471                     vchan = VCHAN1;
3472                 have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
3473                 if (have_rrbs < 2) {
3474                     if (pci_addr & PCI64_ATTR_PREF)
3475                         min_rrbs = 2;
3476                     else
3477                         min_rrbs = 1;
3478                     if (have_rrbs < min_rrbs)
3479                         do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, vchan,
3480                                                min_rrbs - have_rrbs);
3481                 }
3482             }
3483 #if HWG_PERF_CHECK
3484             if (xio_addr != 0x20000000)
3485 #endif
3486                 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3487                             "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
3488                             "xio_port=0x%x, direct64: pci_addr=0x%x, "
3489                             "new flags: 0x%x\n", paddr, paddr + req_size - 1,
3490                             xio_addr, pci_addr, (uint64_t) flags));
3491             return (pci_addr);
3492         }
3493 
3494         PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3495                     "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
3496                     "xio_port=0x%x, Unable to set direct64 Device(x) bits\n",
3497                     paddr, paddr + req_size - 1, xio_addr));
3498 
3499         /* PIC only supports 64-bit direct mapping in PCI-X mode */
3500         if (IS_PCIX(pcibr_soft)) {
3501             return 0;
3502         }
3503 
3504         /* our flags conflict with Device(x). try direct32*/
3505         flags = flags & ~(PCIIO_DMA_A64 | PCIBR_VCHAN0);
3506     }
3507     /* Try to satisfy the request with the 32-bit direct
3508      * map. This can fail if the configuration bits in
3509      * Device(x) conflict with our flags, or if the
3510      * target address is outside where DIR_OFF points.
3511      */
3512     {
3513         size_t                  map_size = 1ULL << 31;
3514         iopaddr_t               xio_base = pcibr_soft->bs_dir_xbase;
3515         iopaddr_t               offset = xio_addr - xio_base;
3516         iopaddr_t               endoff = req_size + offset;
3517 
3518         if ((req_size > map_size) ||
3519             (xio_addr < xio_base) ||
3520             (xio_port != pcibr_soft->bs_dir_xport) ||
3521             (endoff > map_size)) {
3522 
3523             PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3524                         "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
3525                         "xio_port=0x%x, xio region outside direct32 target\n",
3526                         paddr, paddr + req_size - 1, xio_addr));
3527         } else {
3528             pci_addr = slotp->bss_d32_base;
3529             if ((pci_addr != PCIBR_D32_BASE_UNSET) &&
3530                 (flags == slotp->bss_d32_flags)) {
3531 
3532                 pci_addr |= offset;
3533 
3534                 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3535                             "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
3536                             "xio_port=0x%x, direct32: pci_addr=0x%x\n",
3537                             paddr, paddr + req_size - 1, xio_addr, pci_addr));
3538 
3539                 return (pci_addr);
3540             }
3541             if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS)) {
3542 
3543                 pci_addr = PCI32_DIRECT_BASE;
3544                 slotp->bss_d32_flags = flags;
3545                 slotp->bss_d32_base = pci_addr;
3546                 pci_addr |= offset;
3547 
3548                 /* Make sure we have an RRB (or two).
3549                  */
3550                 if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
3551                     have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
3552                     if (have_rrbs < 2) {
3553                         if (slotp->bss_device & BRIDGE_DEV_PREF)
3554                             min_rrbs = 2;
3555                         else
3556                             min_rrbs = 1;
3557                         if (have_rrbs < min_rrbs)
3558                             do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, 
3559                                                    vchan, min_rrbs - have_rrbs);
3560                     }
3561                 }
3562 #if HWG_PERF_CHECK
3563                 if (xio_addr != 0x20000000)
3564 #endif
3565                     PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3566                             "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
3567                             "xio_port=0x%x, direct32: pci_addr=0x%x, "
3568                             "new flags: 0x%x\n", paddr, paddr + req_size - 1,
3569                             xio_addr, pci_addr, (uint64_t) flags));
3570 
3571                 return (pci_addr);
3572             }
3573             /* our flags conflict with Device(x).
3574              */
3575             PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3576                     "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
3577                     "xio_port=0x%x, Unable to set direct32 Device(x) bits\n",
3578                     paddr, paddr + req_size - 1, xio_port));
3579         }
3580     }
3581 
3582     PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3583                 "pcibr_dmatrans_addr:  wanted paddr [0x%x..0x%x], "
3584                 "xio_port=0x%x, No acceptable PCI address found\n",
3585                 paddr, paddr + req_size - 1, xio_port));
3586 
3587     return 0;
3588 }
3589 
3590 void
3591 pcibr_dmamap_drain(pcibr_dmamap_t map)
3592 {
3593     xtalk_dmamap_drain(map->bd_xtalk);
3594 }
3595 
3596 void
3597 pcibr_dmaaddr_drain(vertex_hdl_t pconn_vhdl,
3598                     paddr_t paddr,
3599                     size_t bytes)
3600 {
3601     pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
3602     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3603     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
3604 
3605     xtalk_dmaaddr_drain(xconn_vhdl, paddr, bytes);
3606 }
3607 
3608 void
3609 pcibr_dmalist_drain(vertex_hdl_t pconn_vhdl,
3610                     alenlist_t list)
3611 {
3612     pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
3613     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3614     vertex_hdl_t            xconn_vhdl = pcibr_soft->bs_conn;
3615 
3616     xtalk_dmalist_drain(xconn_vhdl, list);
3617 }
3618 
3619 /*
3620  * Get the starting PCIbus address out of the given DMA map.
3621  * This function is supposed to be used by a close friend of PCI bridge
3622  * since it relies on the fact that the starting address of the map is fixed at
3623  * the allocation time in the current implementation of PCI bridge.
3624  */
3625 iopaddr_t
3626 pcibr_dmamap_pciaddr_get(pcibr_dmamap_t pcibr_dmamap)
3627 {
3628     return (pcibr_dmamap->bd_pci_addr);
3629 }
3630 
3631 /* =====================================================================
3632  *    CONFIGURATION MANAGEMENT
3633  */
3634 /*ARGSUSED */
3635 void
3636 pcibr_provider_startup(vertex_hdl_t pcibr)
3637 {
3638 }
3639 
3640 /*ARGSUSED */
3641 void
3642 pcibr_provider_shutdown(vertex_hdl_t pcibr)
3643 {
3644 }
3645 
3646 int
3647 pcibr_reset(vertex_hdl_t conn)
3648 {
3649         BUG();
3650         return -1;
3651 }
3652 
3653 pciio_endian_t
3654 pcibr_endian_set(vertex_hdl_t pconn_vhdl,
3655                  pciio_endian_t device_end,
3656                  pciio_endian_t desired_end)
3657 {
3658     pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
3659     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3660     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3661     bridgereg_t             devreg;
3662     unsigned long           s;
3663 
3664     /*
3665      * Bridge supports hardware swapping; so we can always
3666      * arrange for the caller's desired endianness.
3667      */
3668 
3669     s = pcibr_lock(pcibr_soft);
3670     devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
3671     if (device_end != desired_end)
3672         devreg |= BRIDGE_DEV_SWAP_BITS;
3673     else
3674         devreg &= ~BRIDGE_DEV_SWAP_BITS;
3675 
3676     /* NOTE- if we ever put SWAP bits
3677      * onto the disabled list, we will
3678      * have to change the logic here.
3679      */
3680     if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
3681         bridge_t               *bridge = pcibr_soft->bs_base;
3682 
3683         if ( IS_PIC_SOFT(pcibr_soft) ) {
3684                 bridge->b_device[pciio_slot].reg = devreg;
3685                 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3686                 bridge->b_wid_tflush;           /* wait until Bridge PIO complete */
3687         }
3688         else {
3689                 if (io_get_sh_swapper(NASID_GET(bridge))) {
3690                         BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
3691                         pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3692                         BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
3693                 } else {
3694                         bridge->b_device[pciio_slot].reg = devreg;
3695                         pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3696                         bridge->b_wid_tflush;           /* wait until Bridge PIO complete */
3697                 }
3698         }
3699     }
3700     pcibr_unlock(pcibr_soft, s);
3701 
3702     printk("pcibr_endian_set: Device(%d): %x\n", pciio_slot, devreg);
3703     return desired_end;
3704 }
3705 
3706 /* This (re)sets the GBR and REALTIME bits and also keeps track of how
3707  * many sets are outstanding. Reset succeeds only if the number of outstanding
3708  * sets == 1.
3709  */
3710 int
3711 pcibr_priority_bits_set(pcibr_soft_t pcibr_soft,
3712                         pciio_slot_t pciio_slot,
3713                         pciio_priority_t device_prio)
3714 {
3715     unsigned long           s;
3716     int                    *counter;
3717     bridgereg_t             rtbits = 0;
3718     bridgereg_t             devreg;
3719     int                     rc = PRIO_SUCCESS;
3720 
3721     /* in dual-slot configurations, the host and the
3722      * guest have separate DMA resources, so they
3723      * have separate requirements for priority bits.
3724      */
3725 
3726     counter = &(pcibr_soft->bs_slot[pciio_slot].bss_pri_uctr);
3727 
3728     /*
3729      * Bridge supports PCI notions of LOW and HIGH priority
3730      * arbitration rings via a "REAL_TIME" bit in the per-device
3731      * Bridge register. The "GBR" bit controls access to the GBR
3732      * ring on the xbow. These two bits are (re)set together.
3733      *
3734      * XXX- Bug in Rev B Bridge Si:
3735      * Symptom: Prefetcher starts operating incorrectly. This happens
3736      * due to corruption of the address storage ram in the prefetcher
3737      * when a non-real time PCI request is pulled and a real-time one is
3738      * put in it's place. Workaround: Use only a single arbitration ring
3739      * on PCI bus. GBR and RR can still be uniquely used per
3740      * device. NETLIST MERGE DONE, WILL BE FIXED IN REV C.
3741      */
3742 
3743     if (pcibr_soft->bs_rev_num != BRIDGE_PART_REV_B)
3744         rtbits |= BRIDGE_DEV_RT;
3745 
3746     /* NOTE- if we ever put DEV_RT or DEV_GBR on
3747      * the disabled list, we will have to take
3748      * it into account here.
3749      */
3750 
3751     s = pcibr_lock(pcibr_soft);
3752     devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
3753     if (device_prio == PCI_PRIO_HIGH) {
3754         if ((++*counter == 1)) {
3755             if (rtbits)
3756                 devreg |= rtbits;
3757             else
3758                 rc = PRIO_FAIL;
3759         }
3760     } else if (device_prio == PCI_PRIO_LOW) {
3761         if (*counter <= 0)
3762             rc = PRIO_FAIL;
3763         else if (--*counter == 0)
3764             if (rtbits)
3765                 devreg &= ~rtbits;
3766     }
3767     if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
3768         bridge_t               *bridge = pcibr_soft->bs_base;
3769 
3770         if ( IS_PIC_SOFT(pcibr_soft) ) {
3771                 bridge->b_device[pciio_slot].reg = devreg;
3772                 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3773                 bridge->b_wid_tflush;           /* wait until Bridge PIO complete */
3774         }
3775         else {
3776                 if (io_get_sh_swapper(NASID_GET(bridge))) {
3777                         BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
3778                         pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3779                         BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
3780                 } else {
3781                         bridge->b_device[pciio_slot].reg = devreg;
3782                         pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3783                         bridge->b_wid_tflush;           /* wait until Bridge PIO complete */
3784                 }
3785         }
3786     }
3787     pcibr_unlock(pcibr_soft, s);
3788 
3789     return rc;
3790 }
3791 
3792 pciio_priority_t
3793 pcibr_priority_set(vertex_hdl_t pconn_vhdl,
3794                    pciio_priority_t device_prio)
3795 {
3796     pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
3797     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3798     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3799 
3800     (void) pcibr_priority_bits_set(pcibr_soft, pciio_slot, device_prio);
3801 
3802     return device_prio;
3803 }
3804 
3805 /*
3806  * Interfaces to allow special (e.g. SGI) drivers to set/clear
3807  * Bridge-specific device flags.  Many flags are modified through
3808  * PCI-generic interfaces; we don't allow them to be directly
3809  * manipulated here.  Only flags that at this point seem pretty
3810  * Bridge-specific can be set through these special interfaces.
3811  * We may add more flags as the need arises, or remove flags and
3812  * create PCI-generic interfaces as the need arises.
3813  *
3814  * Returns 0 on failure, 1 on success
3815  */
3816 int
3817 pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
3818                        pcibr_device_flags_t flags)
3819 {
3820     pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
3821     pciio_slot_t            pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3822     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3823     bridgereg_t             set = 0;
3824     bridgereg_t             clr = 0;
3825 
3826     ASSERT((flags & PCIBR_DEVICE_FLAGS) == flags);
3827 
3828     if (flags & PCIBR_WRITE_GATHER)
3829         set |= BRIDGE_DEV_PMU_WRGA_EN;
3830     if (flags & PCIBR_NOWRITE_GATHER)
3831         clr |= BRIDGE_DEV_PMU_WRGA_EN;
3832 
3833     if (flags & PCIBR_WRITE_GATHER)
3834         set |= BRIDGE_DEV_DIR_WRGA_EN;
3835     if (flags & PCIBR_NOWRITE_GATHER)
3836         clr |= BRIDGE_DEV_DIR_WRGA_EN;
3837 
3838     if (flags & PCIBR_PREFETCH)
3839         set |= BRIDGE_DEV_PREF;
3840     if (flags & PCIBR_NOPREFETCH)
3841         clr |= BRIDGE_DEV_PREF;
3842 
3843     if (flags & PCIBR_PRECISE)
3844         set |= BRIDGE_DEV_PRECISE;
3845     if (flags & PCIBR_NOPRECISE)
3846         clr |= BRIDGE_DEV_PRECISE;
3847 
3848     if (flags & PCIBR_BARRIER)
3849         set |= BRIDGE_DEV_BARRIER;
3850     if (flags & PCIBR_NOBARRIER)
3851         clr |= BRIDGE_DEV_BARRIER;
3852 
3853     if (flags & PCIBR_64BIT)
3854         set |= BRIDGE_DEV_DEV_SIZE;
3855     if (flags & PCIBR_NO64BIT)
3856         clr |= BRIDGE_DEV_DEV_SIZE;
3857 
3858     if (set || clr) {
3859         bridgereg_t             devreg;
3860         unsigned long           s;
3861 
3862         s = pcibr_lock(pcibr_soft);
3863         devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
3864         devreg = (devreg & ~clr) | set;
3865         if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
3866             bridge_t               *bridge = pcibr_soft->bs_base;
3867 
3868             if ( IS_PIC_SOFT(pcibr_soft) ) {
3869                 bridge->b_device[pciio_slot].reg = devreg;
3870                 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3871                 bridge->b_wid_tflush;   /* wait until Bridge PIO complete */
3872             }
3873             else {
3874                 if (io_get_sh_swapper(NASID_GET(bridge))) {
3875                         BRIDGE_REG_SET32((&bridge->b_device[pciio_slot].reg)) = __swab32(devreg);
3876                         pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3877                         BRIDGE_REG_GET32((&bridge->b_wid_tflush));/* wait until Bridge PIO complete */
3878                 } else {
3879                         bridge->b_device[pciio_slot].reg = devreg;
3880                         pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3881                         bridge->b_wid_tflush;       /* wait until Bridge PIO complete */
3882                 }
3883             }
3884         }
3885         pcibr_unlock(pcibr_soft, s);
3886         printk("pcibr_device_flags_set: Device(%d): %x\n", pciio_slot, devreg);
3887     }
3888     return (1);
3889 }
3890 
3891 /*
3892  * PIC has 16 RBARs per bus; meaning it can have a total of 16 outstanding 
3893  * split transactions.  If the functions on the bus have requested a total 
3894  * of 16 or less, then we can give them what they requested (ie. 100%). 
3895  * Otherwise we have make sure each function can get at least one buffer
3896  * and then divide the rest of the buffers up among the functions as ``A 
3897  * PERCENTAGE OF WHAT THEY REQUESTED'' (i.e. 0% - 100% of a function's
3898  * pcix_type0_status.max_out_split).  This percentage does not include the
3899  * one RBAR that all functions get by default.
3900  */
3901 int
3902 pcibr_pcix_rbars_calc(pcibr_soft_t pcibr_soft)
3903 {
3904     /* 'percent_allowed' is the percentage of requested RBARs that functions
3905      * are allowed, ***less the 1 RBAR that all functions get by default***
3906      */
3907     int percent_allowed; 
3908 
3909     if (pcibr_soft->bs_pcix_num_funcs) {
3910         if (pcibr_soft->bs_pcix_num_funcs > NUM_RBAR) {
3911             printk(KERN_WARNING
3912                 "%lx: Must oversubscribe Read Buffer Attribute Registers"
3913                 "(RBAR).  Bus has %d RBARs but %d funcs need them.\n",
3914                 (unsigned long)pcibr_soft->bs_vhdl, NUM_RBAR, pcibr_soft->bs_pcix_num_funcs);
3915             percent_allowed = 0;
3916         } else {
3917             percent_allowed = (((NUM_RBAR-pcibr_soft->bs_pcix_num_funcs)*100) /
3918                                pcibr_soft->bs_pcix_split_tot);
3919 
3920             /* +1 to percentage to solve rounding errors that occur because
3921              * we're not doing fractional math. (ie. ((3 * 66%) / 100) = 1)
3922              * but should be "2" if doing true fractional math.  NOTE: Since
3923              * the greatest number of outstanding transactions a function 
3924              * can request is 32, this "+1" will always work (i.e. we won't
3925              * accidentally oversubscribe the RBARs because of this rounding
3926              * of the percentage).
3927              */
3928             percent_allowed=(percent_allowed > 100) ? 100 : percent_allowed+1;
3929         }
3930     } else {
3931         return(ENODEV);
3932     }
3933 
3934     return(percent_allowed);
3935 }
3936 
3937 pciio_provider_t        pcibr_provider =
3938 {
3939     (pciio_piomap_alloc_f *) pcibr_piomap_alloc,
3940     (pciio_piomap_free_f *) pcibr_piomap_free,
3941     (pciio_piomap_addr_f *) pcibr_piomap_addr,
3942     (pciio_piomap_done_f *) pcibr_piomap_done,
3943     (pciio_piotrans_addr_f *) pcibr_piotrans_addr,
3944     (pciio_piospace_alloc_f *) pcibr_piospace_alloc,
3945     (pciio_piospace_free_f *) pcibr_piospace_free,
3946 
3947     (pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
3948     (pciio_dmamap_free_f *) pcibr_dmamap_free,
3949     (pciio_dmamap_addr_f *) pcibr_dmamap_addr,
3950     (pciio_dmamap_done_f *) pcibr_dmamap_done,
3951     (pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
3952     (pciio_dmamap_drain_f *) pcibr_dmamap_drain,
3953     (pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
3954     (pciio_dmalist_drain_f *) pcibr_dmalist_drain,
3955 
3956     (pciio_intr_alloc_f *) pcibr_intr_alloc,
3957     (pciio_intr_free_f *) pcibr_intr_free,
3958     (pciio_intr_connect_f *) pcibr_intr_connect,
3959     (pciio_intr_disconnect_f *) pcibr_intr_disconnect,
3960     (pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
3961 
3962     (pciio_provider_startup_f *) pcibr_provider_startup,
3963     (pciio_provider_shutdown_f *) pcibr_provider_shutdown,
3964     (pciio_reset_f *) pcibr_reset,
3965     (pciio_write_gather_flush_f *) pcibr_write_gather_flush,
3966     (pciio_endian_set_f *) pcibr_endian_set,
3967     (pciio_priority_set_f *) pcibr_priority_set,
3968     (pciio_config_get_f *) pcibr_config_get,
3969     (pciio_config_set_f *) pcibr_config_set,
3970     (pciio_error_devenable_f *) 0,
3971     (pciio_error_extract_f *) 0,
3972     (pciio_driver_reg_callback_f *) 0,
3973     (pciio_driver_unreg_callback_f *) 0,
3974     (pciio_device_unregister_f  *) pcibr_device_unregister,
3975     (pciio_dma_enabled_f                *) pcibr_dma_enabled,
3976 };
3977 
3978 int
3979 pcibr_dma_enabled(vertex_hdl_t pconn_vhdl)
3980 {
3981     pciio_info_t            pciio_info = pciio_info_get(pconn_vhdl);
3982     pcibr_soft_t            pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3983         
3984 
3985     return xtalk_dma_enabled(pcibr_soft->bs_conn);
3986 }
3987 
3988 
3989 /*
3990  * pcibr_debug() is used to print pcibr debug messages to the console.  A
3991  * user enables tracing by setting the following global variables:
3992  *
3993  *    pcibr_debug_mask     -Bitmask of what to trace. see pcibr_private.h
3994  *    pcibr_debug_module   -Module to trace.  'all' means trace all modules
3995  *    pcibr_debug_widget   -Widget to trace. '-1' means trace all widgets
3996  *    pcibr_debug_slot     -Slot to trace.  '-1' means trace all slots
3997  *
3998  * 'type' is the type of debugging that the current PCIBR_DEBUG macro is
3999  * tracing.  'vhdl' (which can be NULL) is the vhdl associated with the
4000  * debug statement.  If there is a 'vhdl' associated with this debug
4001  * statement, it is parsed to obtain the module, widget, and slot.  If the
4002  * globals above match the PCIBR_DEBUG params, then the debug info in the
4003  * parameter 'format' is sent to the console.
4004  */
4005 void
4006 pcibr_debug(uint32_t type, vertex_hdl_t vhdl, char *format, ...)
4007 {
4008     char hwpath[MAXDEVNAME] = "\0";
4009     char copy_of_hwpath[MAXDEVNAME];
4010     char *module = "all";
4011     short widget = -1;
4012     short slot = -1;
4013     va_list ap;
4014 
4015     if (pcibr_debug_mask & type) {
4016         if (vhdl) {
4017             if (!hwgraph_vertex_name_get(vhdl, hwpath, MAXDEVNAME)) {
4018                 char *cp;
4019 
4020                 if (strcmp(module, pcibr_debug_module)) {
4021                     /* use a copy */
4022                     (void)strcpy(copy_of_hwpath, hwpath);
4023                     cp = strstr(copy_of_hwpath, "/module/");
4024                     if (cp) {
4025                         cp += strlen("/module");
4026                         module = strsep(&cp, "/");
4027                     }
4028                 }
4029                 if (pcibr_debug_widget != -1) {
4030                     cp = strstr(hwpath, "/xtalk/");
4031                     if (cp) {
4032                         cp += strlen("/xtalk/");
4033                         widget = simple_strtoul(cp, NULL, 0);
4034                     }
4035                 }
4036                 if (pcibr_debug_slot != -1) {
4037                     cp = strstr(hwpath, "/pci/");
4038                     if (cp) {
4039                         cp += strlen("/pci/");
4040                         slot = simple_strtoul(cp, NULL, 0);
4041                     }
4042                 }
4043             }
4044         }
4045         if ((vhdl == NULL) ||
4046             (!strcmp(module, pcibr_debug_module) &&
4047              (widget == pcibr_debug_widget) &&
4048              (slot == pcibr_debug_slot))) {
4049 #ifdef LATER
4050             printk("PCIBR_DEBUG<%d>\t: %s :", cpuid(), hwpath);
4051 #else
4052             printk("PCIBR_DEBUG\t: %s :", hwpath);
4053 #endif
4054             /*
4055              * Kernel printk translates to this 3 line sequence.
4056              * Since we have a variable length argument list, we
4057              * need to call printk this way rather than directly
4058              */
4059             {
4060                 char buffer[500];
4061 
4062                 va_start(ap, format);
4063                 vsnprintf(buffer, 500, format, ap);
4064                 va_end(ap);
4065                 buffer[499] = (char)0;  /* just to be safe */
4066                 printk("%s", buffer);
4067             }
4068         }
4069     }
4070 }
4071 
4072 int
4073 isIO9(nasid_t nasid) {
4074         lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
4075 
4076         while (brd) {
4077                 if (brd->brd_flags & LOCAL_MASTER_IO6) {
4078                         return 1;
4079                 }
4080                 brd = KLCF_NEXT(brd);
4081         }
4082         /* if it's dual ported, check the peer also */
4083         nasid = NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->xbow_peer;
4084         if (nasid < 0) return 0;
4085         brd = (lboard_t *)KL_CONFIG_INFO(nasid);
4086         while (brd) {
4087                 if (brd->brd_flags & LOCAL_MASTER_IO6) {
4088                         return 1;
4089                 }
4090                 brd = KLCF_NEXT(brd);
4091         }
4092         return 0;
4093 }
4094 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp