~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/rdma/ib_verbs.h

Version: ~ [ linux-5.13-rc2 ] ~ [ linux-5.12.4 ] ~ [ linux-5.11.21 ] ~ [ linux-5.10.37 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.119 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.190 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.232 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.268 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.268 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
  3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
  4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
  5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
  6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
  7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
  9  *
 10  * This software is available to you under a choice of one of two
 11  * licenses.  You may choose to be licensed under the terms of the GNU
 12  * General Public License (GPL) Version 2, available from the file
 13  * COPYING in the main directory of this source tree, or the
 14  * OpenIB.org BSD license below:
 15  *
 16  *     Redistribution and use in source and binary forms, with or
 17  *     without modification, are permitted provided that the following
 18  *     conditions are met:
 19  *
 20  *      - Redistributions of source code must retain the above
 21  *        copyright notice, this list of conditions and the following
 22  *        disclaimer.
 23  *
 24  *      - Redistributions in binary form must reproduce the above
 25  *        copyright notice, this list of conditions and the following
 26  *        disclaimer in the documentation and/or other materials
 27  *        provided with the distribution.
 28  *
 29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 36  * SOFTWARE.
 37  */
 38 
 39 #if !defined(IB_VERBS_H)
 40 #define IB_VERBS_H
 41 
 42 #include <linux/types.h>
 43 #include <linux/device.h>
 44 #include <linux/mm.h>
 45 #include <linux/dma-mapping.h>
 46 #include <linux/kref.h>
 47 #include <linux/list.h>
 48 #include <linux/rwsem.h>
 49 #include <linux/scatterlist.h>
 50 #include <linux/workqueue.h>
 51 #include <linux/socket.h>
 52 #include <linux/irq_poll.h>
 53 #include <uapi/linux/if_ether.h>
 54 #include <net/ipv6.h>
 55 #include <net/ip.h>
 56 #include <linux/string.h>
 57 #include <linux/slab.h>
 58 #include <linux/netdevice.h>
 59 
 60 #include <linux/if_link.h>
 61 #include <linux/atomic.h>
 62 #include <linux/mmu_notifier.h>
 63 #include <linux/uaccess.h>
 64 #include <linux/cgroup_rdma.h>
 65 #include <uapi/rdma/ib_user_verbs.h>
 66 
 67 extern struct workqueue_struct *ib_wq;
 68 extern struct workqueue_struct *ib_comp_wq;
 69 
 70 union ib_gid {
 71         u8      raw[16];
 72         struct {
 73                 __be64  subnet_prefix;
 74                 __be64  interface_id;
 75         } global;
 76 };
 77 
 78 extern union ib_gid zgid;
 79 
 80 enum ib_gid_type {
 81         /* If link layer is Ethernet, this is RoCE V1 */
 82         IB_GID_TYPE_IB        = 0,
 83         IB_GID_TYPE_ROCE      = 0,
 84         IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
 85         IB_GID_TYPE_SIZE
 86 };
 87 
 88 #define ROCE_V2_UDP_DPORT      4791
 89 struct ib_gid_attr {
 90         enum ib_gid_type        gid_type;
 91         struct net_device       *ndev;
 92 };
 93 
 94 enum rdma_node_type {
 95         /* IB values map to NodeInfo:NodeType. */
 96         RDMA_NODE_IB_CA         = 1,
 97         RDMA_NODE_IB_SWITCH,
 98         RDMA_NODE_IB_ROUTER,
 99         RDMA_NODE_RNIC,
100         RDMA_NODE_USNIC,
101         RDMA_NODE_USNIC_UDP,
102 };
103 
104 enum {
105         /* set the local administered indication */
106         IB_SA_WELL_KNOWN_GUID   = BIT_ULL(57) | 2,
107 };
108 
109 enum rdma_transport_type {
110         RDMA_TRANSPORT_IB,
111         RDMA_TRANSPORT_IWARP,
112         RDMA_TRANSPORT_USNIC,
113         RDMA_TRANSPORT_USNIC_UDP
114 };
115 
116 enum rdma_protocol_type {
117         RDMA_PROTOCOL_IB,
118         RDMA_PROTOCOL_IBOE,
119         RDMA_PROTOCOL_IWARP,
120         RDMA_PROTOCOL_USNIC_UDP
121 };
122 
123 __attribute_const__ enum rdma_transport_type
124 rdma_node_get_transport(enum rdma_node_type node_type);
125 
126 enum rdma_network_type {
127         RDMA_NETWORK_IB,
128         RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB,
129         RDMA_NETWORK_IPV4,
130         RDMA_NETWORK_IPV6
131 };
132 
133 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
134 {
135         if (network_type == RDMA_NETWORK_IPV4 ||
136             network_type == RDMA_NETWORK_IPV6)
137                 return IB_GID_TYPE_ROCE_UDP_ENCAP;
138 
139         /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */
140         return IB_GID_TYPE_IB;
141 }
142 
143 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
144                                                             union ib_gid *gid)
145 {
146         if (gid_type == IB_GID_TYPE_IB)
147                 return RDMA_NETWORK_IB;
148 
149         if (ipv6_addr_v4mapped((struct in6_addr *)gid))
150                 return RDMA_NETWORK_IPV4;
151         else
152                 return RDMA_NETWORK_IPV6;
153 }
154 
155 enum rdma_link_layer {
156         IB_LINK_LAYER_UNSPECIFIED,
157         IB_LINK_LAYER_INFINIBAND,
158         IB_LINK_LAYER_ETHERNET,
159 };
160 
161 enum ib_device_cap_flags {
162         IB_DEVICE_RESIZE_MAX_WR                 = (1 << 0),
163         IB_DEVICE_BAD_PKEY_CNTR                 = (1 << 1),
164         IB_DEVICE_BAD_QKEY_CNTR                 = (1 << 2),
165         IB_DEVICE_RAW_MULTI                     = (1 << 3),
166         IB_DEVICE_AUTO_PATH_MIG                 = (1 << 4),
167         IB_DEVICE_CHANGE_PHY_PORT               = (1 << 5),
168         IB_DEVICE_UD_AV_PORT_ENFORCE            = (1 << 6),
169         IB_DEVICE_CURR_QP_STATE_MOD             = (1 << 7),
170         IB_DEVICE_SHUTDOWN_PORT                 = (1 << 8),
171         IB_DEVICE_INIT_TYPE                     = (1 << 9),
172         IB_DEVICE_PORT_ACTIVE_EVENT             = (1 << 10),
173         IB_DEVICE_SYS_IMAGE_GUID                = (1 << 11),
174         IB_DEVICE_RC_RNR_NAK_GEN                = (1 << 12),
175         IB_DEVICE_SRQ_RESIZE                    = (1 << 13),
176         IB_DEVICE_N_NOTIFY_CQ                   = (1 << 14),
177 
178         /*
179          * This device supports a per-device lkey or stag that can be
180          * used without performing a memory registration for the local
181          * memory.  Note that ULPs should never check this flag, but
182          * instead of use the local_dma_lkey flag in the ib_pd structure,
183          * which will always contain a usable lkey.
184          */
185         IB_DEVICE_LOCAL_DMA_LKEY                = (1 << 15),
186         IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16),
187         IB_DEVICE_MEM_WINDOW                    = (1 << 17),
188         /*
189          * Devices should set IB_DEVICE_UD_IP_SUM if they support
190          * insertion of UDP and TCP checksum on outgoing UD IPoIB
191          * messages and can verify the validity of checksum for
192          * incoming messages.  Setting this flag implies that the
193          * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
194          */
195         IB_DEVICE_UD_IP_CSUM                    = (1 << 18),
196         IB_DEVICE_UD_TSO                        = (1 << 19),
197         IB_DEVICE_XRC                           = (1 << 20),
198 
199         /*
200          * This device supports the IB "base memory management extension",
201          * which includes support for fast registrations (IB_WR_REG_MR,
202          * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs).  This flag should
203          * also be set by any iWarp device which must support FRs to comply
204          * to the iWarp verbs spec.  iWarp devices also support the
205          * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
206          * stag.
207          */
208         IB_DEVICE_MEM_MGT_EXTENSIONS            = (1 << 21),
209         IB_DEVICE_BLOCK_MULTICAST_LOOPBACK      = (1 << 22),
210         IB_DEVICE_MEM_WINDOW_TYPE_2A            = (1 << 23),
211         IB_DEVICE_MEM_WINDOW_TYPE_2B            = (1 << 24),
212         IB_DEVICE_RC_IP_CSUM                    = (1 << 25),
213         /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
214         IB_DEVICE_RAW_IP_CSUM                   = (1 << 26),
215         /*
216          * Devices should set IB_DEVICE_CROSS_CHANNEL if they
217          * support execution of WQEs that involve synchronization
218          * of I/O operations with single completion queue managed
219          * by hardware.
220          */
221         IB_DEVICE_CROSS_CHANNEL         = (1 << 27),
222         IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
223         IB_DEVICE_SIGNATURE_HANDOVER            = (1 << 30),
224         IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
225         IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
226         IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
227         /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
228         IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
229         IB_DEVICE_RDMA_NETDEV_OPA_VNIC          = (1ULL << 35),
230 };
231 
232 enum ib_signature_prot_cap {
233         IB_PROT_T10DIF_TYPE_1 = 1,
234         IB_PROT_T10DIF_TYPE_2 = 1 << 1,
235         IB_PROT_T10DIF_TYPE_3 = 1 << 2,
236 };
237 
238 enum ib_signature_guard_cap {
239         IB_GUARD_T10DIF_CRC     = 1,
240         IB_GUARD_T10DIF_CSUM    = 1 << 1,
241 };
242 
243 enum ib_atomic_cap {
244         IB_ATOMIC_NONE,
245         IB_ATOMIC_HCA,
246         IB_ATOMIC_GLOB
247 };
248 
249 enum ib_odp_general_cap_bits {
250         IB_ODP_SUPPORT          = 1 << 0,
251         IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
252 };
253 
254 enum ib_odp_transport_cap_bits {
255         IB_ODP_SUPPORT_SEND     = 1 << 0,
256         IB_ODP_SUPPORT_RECV     = 1 << 1,
257         IB_ODP_SUPPORT_WRITE    = 1 << 2,
258         IB_ODP_SUPPORT_READ     = 1 << 3,
259         IB_ODP_SUPPORT_ATOMIC   = 1 << 4,
260 };
261 
262 struct ib_odp_caps {
263         uint64_t general_caps;
264         struct {
265                 uint32_t  rc_odp_caps;
266                 uint32_t  uc_odp_caps;
267                 uint32_t  ud_odp_caps;
268         } per_transport_caps;
269 };
270 
271 struct ib_rss_caps {
272         /* Corresponding bit will be set if qp type from
273          * 'enum ib_qp_type' is supported, e.g.
274          * supported_qpts |= 1 << IB_QPT_UD
275          */
276         u32 supported_qpts;
277         u32 max_rwq_indirection_tables;
278         u32 max_rwq_indirection_table_size;
279 };
280 
281 enum ib_cq_creation_flags {
282         IB_CQ_FLAGS_TIMESTAMP_COMPLETION   = 1 << 0,
283         IB_CQ_FLAGS_IGNORE_OVERRUN         = 1 << 1,
284 };
285 
286 struct ib_cq_init_attr {
287         unsigned int    cqe;
288         int             comp_vector;
289         u32             flags;
290 };
291 
292 struct ib_device_attr {
293         u64                     fw_ver;
294         __be64                  sys_image_guid;
295         u64                     max_mr_size;
296         u64                     page_size_cap;
297         u32                     vendor_id;
298         u32                     vendor_part_id;
299         u32                     hw_ver;
300         int                     max_qp;
301         int                     max_qp_wr;
302         u64                     device_cap_flags;
303         int                     max_sge;
304         int                     max_sge_rd;
305         int                     max_cq;
306         int                     max_cqe;
307         int                     max_mr;
308         int                     max_pd;
309         int                     max_qp_rd_atom;
310         int                     max_ee_rd_atom;
311         int                     max_res_rd_atom;
312         int                     max_qp_init_rd_atom;
313         int                     max_ee_init_rd_atom;
314         enum ib_atomic_cap      atomic_cap;
315         enum ib_atomic_cap      masked_atomic_cap;
316         int                     max_ee;
317         int                     max_rdd;
318         int                     max_mw;
319         int                     max_raw_ipv6_qp;
320         int                     max_raw_ethy_qp;
321         int                     max_mcast_grp;
322         int                     max_mcast_qp_attach;
323         int                     max_total_mcast_qp_attach;
324         int                     max_ah;
325         int                     max_fmr;
326         int                     max_map_per_fmr;
327         int                     max_srq;
328         int                     max_srq_wr;
329         int                     max_srq_sge;
330         unsigned int            max_fast_reg_page_list_len;
331         u16                     max_pkeys;
332         u8                      local_ca_ack_delay;
333         int                     sig_prot_cap;
334         int                     sig_guard_cap;
335         struct ib_odp_caps      odp_caps;
336         uint64_t                timestamp_mask;
337         uint64_t                hca_core_clock; /* in KHZ */
338         struct ib_rss_caps      rss_caps;
339         u32                     max_wq_type_rq;
340         u32                     raw_packet_caps; /* Use ib_raw_packet_caps enum */
341 };
342 
343 enum ib_mtu {
344         IB_MTU_256  = 1,
345         IB_MTU_512  = 2,
346         IB_MTU_1024 = 3,
347         IB_MTU_2048 = 4,
348         IB_MTU_4096 = 5
349 };
350 
351 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
352 {
353         switch (mtu) {
354         case IB_MTU_256:  return  256;
355         case IB_MTU_512:  return  512;
356         case IB_MTU_1024: return 1024;
357         case IB_MTU_2048: return 2048;
358         case IB_MTU_4096: return 4096;
359         default:          return -1;
360         }
361 }
362 
363 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
364 {
365         if (mtu >= 4096)
366                 return IB_MTU_4096;
367         else if (mtu >= 2048)
368                 return IB_MTU_2048;
369         else if (mtu >= 1024)
370                 return IB_MTU_1024;
371         else if (mtu >= 512)
372                 return IB_MTU_512;
373         else
374                 return IB_MTU_256;
375 }
376 
377 enum ib_port_state {
378         IB_PORT_NOP             = 0,
379         IB_PORT_DOWN            = 1,
380         IB_PORT_INIT            = 2,
381         IB_PORT_ARMED           = 3,
382         IB_PORT_ACTIVE          = 4,
383         IB_PORT_ACTIVE_DEFER    = 5
384 };
385 
386 enum ib_port_cap_flags {
387         IB_PORT_SM                              = 1 <<  1,
388         IB_PORT_NOTICE_SUP                      = 1 <<  2,
389         IB_PORT_TRAP_SUP                        = 1 <<  3,
390         IB_PORT_OPT_IPD_SUP                     = 1 <<  4,
391         IB_PORT_AUTO_MIGR_SUP                   = 1 <<  5,
392         IB_PORT_SL_MAP_SUP                      = 1 <<  6,
393         IB_PORT_MKEY_NVRAM                      = 1 <<  7,
394         IB_PORT_PKEY_NVRAM                      = 1 <<  8,
395         IB_PORT_LED_INFO_SUP                    = 1 <<  9,
396         IB_PORT_SM_DISABLED                     = 1 << 10,
397         IB_PORT_SYS_IMAGE_GUID_SUP              = 1 << 11,
398         IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP       = 1 << 12,
399         IB_PORT_EXTENDED_SPEEDS_SUP             = 1 << 14,
400         IB_PORT_CM_SUP                          = 1 << 16,
401         IB_PORT_SNMP_TUNNEL_SUP                 = 1 << 17,
402         IB_PORT_REINIT_SUP                      = 1 << 18,
403         IB_PORT_DEVICE_MGMT_SUP                 = 1 << 19,
404         IB_PORT_VENDOR_CLASS_SUP                = 1 << 20,
405         IB_PORT_DR_NOTICE_SUP                   = 1 << 21,
406         IB_PORT_CAP_MASK_NOTICE_SUP             = 1 << 22,
407         IB_PORT_BOOT_MGMT_SUP                   = 1 << 23,
408         IB_PORT_LINK_LATENCY_SUP                = 1 << 24,
409         IB_PORT_CLIENT_REG_SUP                  = 1 << 25,
410         IB_PORT_IP_BASED_GIDS                   = 1 << 26,
411 };
412 
413 enum ib_port_width {
414         IB_WIDTH_1X     = 1,
415         IB_WIDTH_4X     = 2,
416         IB_WIDTH_8X     = 4,
417         IB_WIDTH_12X    = 8
418 };
419 
420 static inline int ib_width_enum_to_int(enum ib_port_width width)
421 {
422         switch (width) {
423         case IB_WIDTH_1X:  return  1;
424         case IB_WIDTH_4X:  return  4;
425         case IB_WIDTH_8X:  return  8;
426         case IB_WIDTH_12X: return 12;
427         default:          return -1;
428         }
429 }
430 
431 enum ib_port_speed {
432         IB_SPEED_SDR    = 1,
433         IB_SPEED_DDR    = 2,
434         IB_SPEED_QDR    = 4,
435         IB_SPEED_FDR10  = 8,
436         IB_SPEED_FDR    = 16,
437         IB_SPEED_EDR    = 32,
438         IB_SPEED_HDR    = 64
439 };
440 
441 /**
442  * struct rdma_hw_stats
443  * @timestamp - Used by the core code to track when the last update was
444  * @lifespan - Used by the core code to determine how old the counters
445  *   should be before being updated again.  Stored in jiffies, defaults
446  *   to 10 milliseconds, drivers can override the default be specifying
447  *   their own value during their allocation routine.
448  * @name - Array of pointers to static names used for the counters in
449  *   directory.
450  * @num_counters - How many hardware counters there are.  If name is
451  *   shorter than this number, a kernel oops will result.  Driver authors
452  *   are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
453  *   in their code to prevent this.
454  * @value - Array of u64 counters that are accessed by the sysfs code and
455  *   filled in by the drivers get_stats routine
456  */
457 struct rdma_hw_stats {
458         unsigned long   timestamp;
459         unsigned long   lifespan;
460         const char * const *names;
461         int             num_counters;
462         u64             value[];
463 };
464 
465 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
466 /**
467  * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
468  *   for drivers.
469  * @names - Array of static const char *
470  * @num_counters - How many elements in array
471  * @lifespan - How many milliseconds between updates
472  */
473 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
474                 const char * const *names, int num_counters,
475                 unsigned long lifespan)
476 {
477         struct rdma_hw_stats *stats;
478 
479         stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
480                         GFP_KERNEL);
481         if (!stats)
482                 return NULL;
483         stats->names = names;
484         stats->num_counters = num_counters;
485         stats->lifespan = msecs_to_jiffies(lifespan);
486 
487         return stats;
488 }
489 
490 
491 /* Define bits for the various functionality this port needs to be supported by
492  * the core.
493  */
494 /* Management                           0x00000FFF */
495 #define RDMA_CORE_CAP_IB_MAD            0x00000001
496 #define RDMA_CORE_CAP_IB_SMI            0x00000002
497 #define RDMA_CORE_CAP_IB_CM             0x00000004
498 #define RDMA_CORE_CAP_IW_CM             0x00000008
499 #define RDMA_CORE_CAP_IB_SA             0x00000010
500 #define RDMA_CORE_CAP_OPA_MAD           0x00000020
501 
502 /* Address format                       0x000FF000 */
503 #define RDMA_CORE_CAP_AF_IB             0x00001000
504 #define RDMA_CORE_CAP_ETH_AH            0x00002000
505 #define RDMA_CORE_CAP_OPA_AH            0x00004000
506 
507 /* Protocol                             0xFFF00000 */
508 #define RDMA_CORE_CAP_PROT_IB           0x00100000
509 #define RDMA_CORE_CAP_PROT_ROCE         0x00200000
510 #define RDMA_CORE_CAP_PROT_IWARP        0x00400000
511 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
512 #define RDMA_CORE_CAP_PROT_RAW_PACKET   0x01000000
513 #define RDMA_CORE_CAP_PROT_USNIC        0x02000000
514 
515 #define RDMA_CORE_PORT_IBA_IB          (RDMA_CORE_CAP_PROT_IB  \
516                                         | RDMA_CORE_CAP_IB_MAD \
517                                         | RDMA_CORE_CAP_IB_SMI \
518                                         | RDMA_CORE_CAP_IB_CM  \
519                                         | RDMA_CORE_CAP_IB_SA  \
520                                         | RDMA_CORE_CAP_AF_IB)
521 #define RDMA_CORE_PORT_IBA_ROCE        (RDMA_CORE_CAP_PROT_ROCE \
522                                         | RDMA_CORE_CAP_IB_MAD  \
523                                         | RDMA_CORE_CAP_IB_CM   \
524                                         | RDMA_CORE_CAP_AF_IB   \
525                                         | RDMA_CORE_CAP_ETH_AH)
526 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP                       \
527                                         (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
528                                         | RDMA_CORE_CAP_IB_MAD  \
529                                         | RDMA_CORE_CAP_IB_CM   \
530                                         | RDMA_CORE_CAP_AF_IB   \
531                                         | RDMA_CORE_CAP_ETH_AH)
532 #define RDMA_CORE_PORT_IWARP           (RDMA_CORE_CAP_PROT_IWARP \
533                                         | RDMA_CORE_CAP_IW_CM)
534 #define RDMA_CORE_PORT_INTEL_OPA       (RDMA_CORE_PORT_IBA_IB  \
535                                         | RDMA_CORE_CAP_OPA_MAD)
536 
537 #define RDMA_CORE_PORT_RAW_PACKET       (RDMA_CORE_CAP_PROT_RAW_PACKET)
538 
539 #define RDMA_CORE_PORT_USNIC            (RDMA_CORE_CAP_PROT_USNIC)
540 
541 struct ib_port_attr {
542         u64                     subnet_prefix;
543         enum ib_port_state      state;
544         enum ib_mtu             max_mtu;
545         enum ib_mtu             active_mtu;
546         int                     gid_tbl_len;
547         u32                     port_cap_flags;
548         u32                     max_msg_sz;
549         u32                     bad_pkey_cntr;
550         u32                     qkey_viol_cntr;
551         u16                     pkey_tbl_len;
552         u16                     lid;
553         u16                     sm_lid;
554         u8                      lmc;
555         u8                      max_vl_num;
556         u8                      sm_sl;
557         u8                      subnet_timeout;
558         u8                      init_type_reply;
559         u8                      active_width;
560         u8                      active_speed;
561         u8                      phys_state;
562         bool                    grh_required;
563 };
564 
565 enum ib_device_modify_flags {
566         IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
567         IB_DEVICE_MODIFY_NODE_DESC      = 1 << 1
568 };
569 
570 #define IB_DEVICE_NODE_DESC_MAX 64
571 
572 struct ib_device_modify {
573         u64     sys_image_guid;
574         char    node_desc[IB_DEVICE_NODE_DESC_MAX];
575 };
576 
577 enum ib_port_modify_flags {
578         IB_PORT_SHUTDOWN                = 1,
579         IB_PORT_INIT_TYPE               = (1<<2),
580         IB_PORT_RESET_QKEY_CNTR         = (1<<3)
581 };
582 
583 struct ib_port_modify {
584         u32     set_port_cap_mask;
585         u32     clr_port_cap_mask;
586         u8      init_type;
587 };
588 
589 enum ib_event_type {
590         IB_EVENT_CQ_ERR,
591         IB_EVENT_QP_FATAL,
592         IB_EVENT_QP_REQ_ERR,
593         IB_EVENT_QP_ACCESS_ERR,
594         IB_EVENT_COMM_EST,
595         IB_EVENT_SQ_DRAINED,
596         IB_EVENT_PATH_MIG,
597         IB_EVENT_PATH_MIG_ERR,
598         IB_EVENT_DEVICE_FATAL,
599         IB_EVENT_PORT_ACTIVE,
600         IB_EVENT_PORT_ERR,
601         IB_EVENT_LID_CHANGE,
602         IB_EVENT_PKEY_CHANGE,
603         IB_EVENT_SM_CHANGE,
604         IB_EVENT_SRQ_ERR,
605         IB_EVENT_SRQ_LIMIT_REACHED,
606         IB_EVENT_QP_LAST_WQE_REACHED,
607         IB_EVENT_CLIENT_REREGISTER,
608         IB_EVENT_GID_CHANGE,
609         IB_EVENT_WQ_FATAL,
610 };
611 
612 const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
613 
614 struct ib_event {
615         struct ib_device        *device;
616         union {
617                 struct ib_cq    *cq;
618                 struct ib_qp    *qp;
619                 struct ib_srq   *srq;
620                 struct ib_wq    *wq;
621                 u8              port_num;
622         } element;
623         enum ib_event_type      event;
624 };
625 
626 struct ib_event_handler {
627         struct ib_device *device;
628         void            (*handler)(struct ib_event_handler *, struct ib_event *);
629         struct list_head  list;
630 };
631 
632 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler)          \
633         do {                                                    \
634                 (_ptr)->device  = _device;                      \
635                 (_ptr)->handler = _handler;                     \
636                 INIT_LIST_HEAD(&(_ptr)->list);                  \
637         } while (0)
638 
639 struct ib_global_route {
640         union ib_gid    dgid;
641         u32             flow_label;
642         u8              sgid_index;
643         u8              hop_limit;
644         u8              traffic_class;
645 };
646 
647 struct ib_grh {
648         __be32          version_tclass_flow;
649         __be16          paylen;
650         u8              next_hdr;
651         u8              hop_limit;
652         union ib_gid    sgid;
653         union ib_gid    dgid;
654 };
655 
656 union rdma_network_hdr {
657         struct ib_grh ibgrh;
658         struct {
659                 /* The IB spec states that if it's IPv4, the header
660                  * is located in the last 20 bytes of the header.
661                  */
662                 u8              reserved[20];
663                 struct iphdr    roce4grh;
664         };
665 };
666 
667 enum {
668         IB_MULTICAST_QPN = 0xffffff
669 };
670 
671 #define IB_LID_PERMISSIVE       cpu_to_be16(0xFFFF)
672 #define IB_MULTICAST_LID_BASE   cpu_to_be16(0xC000)
673 
674 enum ib_ah_flags {
675         IB_AH_GRH       = 1
676 };
677 
678 enum ib_rate {
679         IB_RATE_PORT_CURRENT = 0,
680         IB_RATE_2_5_GBPS = 2,
681         IB_RATE_5_GBPS   = 5,
682         IB_RATE_10_GBPS  = 3,
683         IB_RATE_20_GBPS  = 6,
684         IB_RATE_30_GBPS  = 4,
685         IB_RATE_40_GBPS  = 7,
686         IB_RATE_60_GBPS  = 8,
687         IB_RATE_80_GBPS  = 9,
688         IB_RATE_120_GBPS = 10,
689         IB_RATE_14_GBPS  = 11,
690         IB_RATE_56_GBPS  = 12,
691         IB_RATE_112_GBPS = 13,
692         IB_RATE_168_GBPS = 14,
693         IB_RATE_25_GBPS  = 15,
694         IB_RATE_100_GBPS = 16,
695         IB_RATE_200_GBPS = 17,
696         IB_RATE_300_GBPS = 18
697 };
698 
699 /**
700  * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
701  * base rate of 2.5 Gbit/sec.  For example, IB_RATE_5_GBPS will be
702  * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
703  * @rate: rate to convert.
704  */
705 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
706 
707 /**
708  * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
709  * For example, IB_RATE_2_5_GBPS will be converted to 2500.
710  * @rate: rate to convert.
711  */
712 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
713 
714 
715 /**
716  * enum ib_mr_type - memory region type
717  * @IB_MR_TYPE_MEM_REG:       memory region that is used for
718  *                            normal registration
719  * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
720  *                            signature operations (data-integrity
721  *                            capable regions)
722  * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
723  *                            register any arbitrary sg lists (without
724  *                            the normal mr constraints - see
725  *                            ib_map_mr_sg)
726  */
727 enum ib_mr_type {
728         IB_MR_TYPE_MEM_REG,
729         IB_MR_TYPE_SIGNATURE,
730         IB_MR_TYPE_SG_GAPS,
731 };
732 
733 /**
734  * Signature types
735  * IB_SIG_TYPE_NONE: Unprotected.
736  * IB_SIG_TYPE_T10_DIF: Type T10-DIF
737  */
738 enum ib_signature_type {
739         IB_SIG_TYPE_NONE,
740         IB_SIG_TYPE_T10_DIF,
741 };
742 
743 /**
744  * Signature T10-DIF block-guard types
745  * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules.
746  * IB_T10DIF_CSUM: Corresponds to IP checksum rules.
747  */
748 enum ib_t10_dif_bg_type {
749         IB_T10DIF_CRC,
750         IB_T10DIF_CSUM
751 };
752 
753 /**
754  * struct ib_t10_dif_domain - Parameters specific for T10-DIF
755  *     domain.
756  * @bg_type: T10-DIF block guard type (CRC|CSUM)
757  * @pi_interval: protection information interval.
758  * @bg: seed of guard computation.
759  * @app_tag: application tag of guard block
760  * @ref_tag: initial guard block reference tag.
761  * @ref_remap: Indicate wethear the reftag increments each block
762  * @app_escape: Indicate to skip block check if apptag=0xffff
763  * @ref_escape: Indicate to skip block check if reftag=0xffffffff
764  * @apptag_check_mask: check bitmask of application tag.
765  */
766 struct ib_t10_dif_domain {
767         enum ib_t10_dif_bg_type bg_type;
768         u16                     pi_interval;
769         u16                     bg;
770         u16                     app_tag;
771         u32                     ref_tag;
772         bool                    ref_remap;
773         bool                    app_escape;
774         bool                    ref_escape;
775         u16                     apptag_check_mask;
776 };
777 
778 /**
779  * struct ib_sig_domain - Parameters for signature domain
780  * @sig_type: specific signauture type
781  * @sig: union of all signature domain attributes that may
782  *     be used to set domain layout.
783  */
784 struct ib_sig_domain {
785         enum ib_signature_type sig_type;
786         union {
787                 struct ib_t10_dif_domain dif;
788         } sig;
789 };
790 
791 /**
792  * struct ib_sig_attrs - Parameters for signature handover operation
793  * @check_mask: bitmask for signature byte check (8 bytes)
794  * @mem: memory domain layout desciptor.
795  * @wire: wire domain layout desciptor.
796  */
797 struct ib_sig_attrs {
798         u8                      check_mask;
799         struct ib_sig_domain    mem;
800         struct ib_sig_domain    wire;
801 };
802 
803 enum ib_sig_err_type {
804         IB_SIG_BAD_GUARD,
805         IB_SIG_BAD_REFTAG,
806         IB_SIG_BAD_APPTAG,
807 };
808 
809 /**
810  * struct ib_sig_err - signature error descriptor
811  */
812 struct ib_sig_err {
813         enum ib_sig_err_type    err_type;
814         u32                     expected;
815         u32                     actual;
816         u64                     sig_err_offset;
817         u32                     key;
818 };
819 
820 enum ib_mr_status_check {
821         IB_MR_CHECK_SIG_STATUS = 1,
822 };
823 
824 /**
825  * struct ib_mr_status - Memory region status container
826  *
827  * @fail_status: Bitmask of MR checks status. For each
828  *     failed check a corresponding status bit is set.
829  * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
830  *     failure.
831  */
832 struct ib_mr_status {
833         u32                 fail_status;
834         struct ib_sig_err   sig_err;
835 };
836 
837 /**
838  * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
839  * enum.
840  * @mult: multiple to convert.
841  */
842 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
843 
844 enum rdma_ah_attr_type {
845         RDMA_AH_ATTR_TYPE_IB,
846         RDMA_AH_ATTR_TYPE_ROCE,
847         RDMA_AH_ATTR_TYPE_OPA,
848 };
849 
850 struct ib_ah_attr {
851         u16                     dlid;
852         u8                      src_path_bits;
853 };
854 
855 struct roce_ah_attr {
856         u8                      dmac[ETH_ALEN];
857 };
858 
859 struct opa_ah_attr {
860         u32                     dlid;
861         u8                      src_path_bits;
862 };
863 
864 struct rdma_ah_attr {
865         struct ib_global_route  grh;
866         u8                      sl;
867         u8                      static_rate;
868         u8                      port_num;
869         u8                      ah_flags;
870         enum rdma_ah_attr_type type;
871         union {
872                 struct ib_ah_attr ib;
873                 struct roce_ah_attr roce;
874                 struct opa_ah_attr opa;
875         };
876 };
877 
878 enum ib_wc_status {
879         IB_WC_SUCCESS,
880         IB_WC_LOC_LEN_ERR,
881         IB_WC_LOC_QP_OP_ERR,
882         IB_WC_LOC_EEC_OP_ERR,
883         IB_WC_LOC_PROT_ERR,
884         IB_WC_WR_FLUSH_ERR,
885         IB_WC_MW_BIND_ERR,
886         IB_WC_BAD_RESP_ERR,
887         IB_WC_LOC_ACCESS_ERR,
888         IB_WC_REM_INV_REQ_ERR,
889         IB_WC_REM_ACCESS_ERR,
890         IB_WC_REM_OP_ERR,
891         IB_WC_RETRY_EXC_ERR,
892         IB_WC_RNR_RETRY_EXC_ERR,
893         IB_WC_LOC_RDD_VIOL_ERR,
894         IB_WC_REM_INV_RD_REQ_ERR,
895         IB_WC_REM_ABORT_ERR,
896         IB_WC_INV_EECN_ERR,
897         IB_WC_INV_EEC_STATE_ERR,
898         IB_WC_FATAL_ERR,
899         IB_WC_RESP_TIMEOUT_ERR,
900         IB_WC_GENERAL_ERR
901 };
902 
903 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
904 
905 enum ib_wc_opcode {
906         IB_WC_SEND,
907         IB_WC_RDMA_WRITE,
908         IB_WC_RDMA_READ,
909         IB_WC_COMP_SWAP,
910         IB_WC_FETCH_ADD,
911         IB_WC_LSO,
912         IB_WC_LOCAL_INV,
913         IB_WC_REG_MR,
914         IB_WC_MASKED_COMP_SWAP,
915         IB_WC_MASKED_FETCH_ADD,
916 /*
917  * Set value of IB_WC_RECV so consumers can test if a completion is a
918  * receive by testing (opcode & IB_WC_RECV).
919  */
920         IB_WC_RECV                      = 1 << 7,
921         IB_WC_RECV_RDMA_WITH_IMM
922 };
923 
924 enum ib_wc_flags {
925         IB_WC_GRH               = 1,
926         IB_WC_WITH_IMM          = (1<<1),
927         IB_WC_WITH_INVALIDATE   = (1<<2),
928         IB_WC_IP_CSUM_OK        = (1<<3),
929         IB_WC_WITH_SMAC         = (1<<4),
930         IB_WC_WITH_VLAN         = (1<<5),
931         IB_WC_WITH_NETWORK_HDR_TYPE     = (1<<6),
932 };
933 
934 struct ib_wc {
935         union {
936                 u64             wr_id;
937                 struct ib_cqe   *wr_cqe;
938         };
939         enum ib_wc_status       status;
940         enum ib_wc_opcode       opcode;
941         u32                     vendor_err;
942         u32                     byte_len;
943         struct ib_qp           *qp;
944         union {
945                 __be32          imm_data;
946                 u32             invalidate_rkey;
947         } ex;
948         u32                     src_qp;
949         int                     wc_flags;
950         u16                     pkey_index;
951         u16                     slid;
952         u8                      sl;
953         u8                      dlid_path_bits;
954         u8                      port_num;       /* valid only for DR SMPs on switches */
955         u8                      smac[ETH_ALEN];
956         u16                     vlan_id;
957         u8                      network_hdr_type;
958 };
959 
960 enum ib_cq_notify_flags {
961         IB_CQ_SOLICITED                 = 1 << 0,
962         IB_CQ_NEXT_COMP                 = 1 << 1,
963         IB_CQ_SOLICITED_MASK            = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
964         IB_CQ_REPORT_MISSED_EVENTS      = 1 << 2,
965 };
966 
967 enum ib_srq_type {
968         IB_SRQT_BASIC,
969         IB_SRQT_XRC
970 };
971 
972 enum ib_srq_attr_mask {
973         IB_SRQ_MAX_WR   = 1 << 0,
974         IB_SRQ_LIMIT    = 1 << 1,
975 };
976 
977 struct ib_srq_attr {
978         u32     max_wr;
979         u32     max_sge;
980         u32     srq_limit;
981 };
982 
983 struct ib_srq_init_attr {
984         void                  (*event_handler)(struct ib_event *, void *);
985         void                   *srq_context;
986         struct ib_srq_attr      attr;
987         enum ib_srq_type        srq_type;
988 
989         union {
990                 struct {
991                         struct ib_xrcd *xrcd;
992                         struct ib_cq   *cq;
993                 } xrc;
994         } ext;
995 };
996 
997 struct ib_qp_cap {
998         u32     max_send_wr;
999         u32     max_recv_wr;
1000         u32     max_send_sge;
1001         u32     max_recv_sge;
1002         u32     max_inline_data;
1003 
1004         /*
1005          * Maximum number of rdma_rw_ctx structures in flight at a time.
1006          * ib_create_qp() will calculate the right amount of neededed WRs
1007          * and MRs based on this.
1008          */
1009         u32     max_rdma_ctxs;
1010 };
1011 
1012 enum ib_sig_type {
1013         IB_SIGNAL_ALL_WR,
1014         IB_SIGNAL_REQ_WR
1015 };
1016 
1017 enum ib_qp_type {
1018         /*
1019          * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1020          * here (and in that order) since the MAD layer uses them as
1021          * indices into a 2-entry table.
1022          */
1023         IB_QPT_SMI,
1024         IB_QPT_GSI,
1025 
1026         IB_QPT_RC,
1027         IB_QPT_UC,
1028         IB_QPT_UD,
1029         IB_QPT_RAW_IPV6,
1030         IB_QPT_RAW_ETHERTYPE,
1031         IB_QPT_RAW_PACKET = 8,
1032         IB_QPT_XRC_INI = 9,
1033         IB_QPT_XRC_TGT,
1034         IB_QPT_MAX,
1035         /* Reserve a range for qp types internal to the low level driver.
1036          * These qp types will not be visible at the IB core layer, so the
1037          * IB_QPT_MAX usages should not be affected in the core layer
1038          */
1039         IB_QPT_RESERVED1 = 0x1000,
1040         IB_QPT_RESERVED2,
1041         IB_QPT_RESERVED3,
1042         IB_QPT_RESERVED4,
1043         IB_QPT_RESERVED5,
1044         IB_QPT_RESERVED6,
1045         IB_QPT_RESERVED7,
1046         IB_QPT_RESERVED8,
1047         IB_QPT_RESERVED9,
1048         IB_QPT_RESERVED10,
1049 };
1050 
1051 enum ib_qp_create_flags {
1052         IB_QP_CREATE_IPOIB_UD_LSO               = 1 << 0,
1053         IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK   = 1 << 1,
1054         IB_QP_CREATE_CROSS_CHANNEL              = 1 << 2,
1055         IB_QP_CREATE_MANAGED_SEND               = 1 << 3,
1056         IB_QP_CREATE_MANAGED_RECV               = 1 << 4,
1057         IB_QP_CREATE_NETIF_QP                   = 1 << 5,
1058         IB_QP_CREATE_SIGNATURE_EN               = 1 << 6,
1059         /* FREE                                 = 1 << 7, */
1060         IB_QP_CREATE_SCATTER_FCS                = 1 << 8,
1061         IB_QP_CREATE_CVLAN_STRIPPING            = 1 << 9,
1062         /* reserve bits 26-31 for low level drivers' internal use */
1063         IB_QP_CREATE_RESERVED_START             = 1 << 26,
1064         IB_QP_CREATE_RESERVED_END               = 1 << 31,
1065 };
1066 
1067 /*
1068  * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1069  * callback to destroy the passed in QP.
1070  */
1071 
1072 struct ib_qp_init_attr {
1073         void                  (*event_handler)(struct ib_event *, void *);
1074         void                   *qp_context;
1075         struct ib_cq           *send_cq;
1076         struct ib_cq           *recv_cq;
1077         struct ib_srq          *srq;
1078         struct ib_xrcd         *xrcd;     /* XRC TGT QPs only */
1079         struct ib_qp_cap        cap;
1080         enum ib_sig_type        sq_sig_type;
1081         enum ib_qp_type         qp_type;
1082         enum ib_qp_create_flags create_flags;
1083 
1084         /*
1085          * Only needed for special QP types, or when using the RW API.
1086          */
1087         u8                      port_num;
1088         struct ib_rwq_ind_table *rwq_ind_tbl;
1089 };
1090 
1091 struct ib_qp_open_attr {
1092         void                  (*event_handler)(struct ib_event *, void *);
1093         void                   *qp_context;
1094         u32                     qp_num;
1095         enum ib_qp_type         qp_type;
1096 };
1097 
1098 enum ib_rnr_timeout {
1099         IB_RNR_TIMER_655_36 =  0,
1100         IB_RNR_TIMER_000_01 =  1,
1101         IB_RNR_TIMER_000_02 =  2,
1102         IB_RNR_TIMER_000_03 =  3,
1103         IB_RNR_TIMER_000_04 =  4,
1104         IB_RNR_TIMER_000_06 =  5,
1105         IB_RNR_TIMER_000_08 =  6,
1106         IB_RNR_TIMER_000_12 =  7,
1107         IB_RNR_TIMER_000_16 =  8,
1108         IB_RNR_TIMER_000_24 =  9,
1109         IB_RNR_TIMER_000_32 = 10,
1110         IB_RNR_TIMER_000_48 = 11,
1111         IB_RNR_TIMER_000_64 = 12,
1112         IB_RNR_TIMER_000_96 = 13,
1113         IB_RNR_TIMER_001_28 = 14,
1114         IB_RNR_TIMER_001_92 = 15,
1115         IB_RNR_TIMER_002_56 = 16,
1116         IB_RNR_TIMER_003_84 = 17,
1117         IB_RNR_TIMER_005_12 = 18,
1118         IB_RNR_TIMER_007_68 = 19,
1119         IB_RNR_TIMER_010_24 = 20,
1120         IB_RNR_TIMER_015_36 = 21,
1121         IB_RNR_TIMER_020_48 = 22,
1122         IB_RNR_TIMER_030_72 = 23,
1123         IB_RNR_TIMER_040_96 = 24,
1124         IB_RNR_TIMER_061_44 = 25,
1125         IB_RNR_TIMER_081_92 = 26,
1126         IB_RNR_TIMER_122_88 = 27,
1127         IB_RNR_TIMER_163_84 = 28,
1128         IB_RNR_TIMER_245_76 = 29,
1129         IB_RNR_TIMER_327_68 = 30,
1130         IB_RNR_TIMER_491_52 = 31
1131 };
1132 
1133 enum ib_qp_attr_mask {
1134         IB_QP_STATE                     = 1,
1135         IB_QP_CUR_STATE                 = (1<<1),
1136         IB_QP_EN_SQD_ASYNC_NOTIFY       = (1<<2),
1137         IB_QP_ACCESS_FLAGS              = (1<<3),
1138         IB_QP_PKEY_INDEX                = (1<<4),
1139         IB_QP_PORT                      = (1<<5),
1140         IB_QP_QKEY                      = (1<<6),
1141         IB_QP_AV                        = (1<<7),
1142         IB_QP_PATH_MTU                  = (1<<8),
1143         IB_QP_TIMEOUT                   = (1<<9),
1144         IB_QP_RETRY_CNT                 = (1<<10),
1145         IB_QP_RNR_RETRY                 = (1<<11),
1146         IB_QP_RQ_PSN                    = (1<<12),
1147         IB_QP_MAX_QP_RD_ATOMIC          = (1<<13),
1148         IB_QP_ALT_PATH                  = (1<<14),
1149         IB_QP_MIN_RNR_TIMER             = (1<<15),
1150         IB_QP_SQ_PSN                    = (1<<16),
1151         IB_QP_MAX_DEST_RD_ATOMIC        = (1<<17),
1152         IB_QP_PATH_MIG_STATE            = (1<<18),
1153         IB_QP_CAP                       = (1<<19),
1154         IB_QP_DEST_QPN                  = (1<<20),
1155         IB_QP_RESERVED1                 = (1<<21),
1156         IB_QP_RESERVED2                 = (1<<22),
1157         IB_QP_RESERVED3                 = (1<<23),
1158         IB_QP_RESERVED4                 = (1<<24),
1159         IB_QP_RATE_LIMIT                = (1<<25),
1160 };
1161 
1162 enum ib_qp_state {
1163         IB_QPS_RESET,
1164         IB_QPS_INIT,
1165         IB_QPS_RTR,
1166         IB_QPS_RTS,
1167         IB_QPS_SQD,
1168         IB_QPS_SQE,
1169         IB_QPS_ERR
1170 };
1171 
1172 enum ib_mig_state {
1173         IB_MIG_MIGRATED,
1174         IB_MIG_REARM,
1175         IB_MIG_ARMED
1176 };
1177 
1178 enum ib_mw_type {
1179         IB_MW_TYPE_1 = 1,
1180         IB_MW_TYPE_2 = 2
1181 };
1182 
1183 struct ib_qp_attr {
1184         enum ib_qp_state        qp_state;
1185         enum ib_qp_state        cur_qp_state;
1186         enum ib_mtu             path_mtu;
1187         enum ib_mig_state       path_mig_state;
1188         u32                     qkey;
1189         u32                     rq_psn;
1190         u32                     sq_psn;
1191         u32                     dest_qp_num;
1192         int                     qp_access_flags;
1193         struct ib_qp_cap        cap;
1194         struct rdma_ah_attr     ah_attr;
1195         struct rdma_ah_attr     alt_ah_attr;
1196         u16                     pkey_index;
1197         u16                     alt_pkey_index;
1198         u8                      en_sqd_async_notify;
1199         u8                      sq_draining;
1200         u8                      max_rd_atomic;
1201         u8                      max_dest_rd_atomic;
1202         u8                      min_rnr_timer;
1203         u8                      port_num;
1204         u8                      timeout;
1205         u8                      retry_cnt;
1206         u8                      rnr_retry;
1207         u8                      alt_port_num;
1208         u8                      alt_timeout;
1209         u32                     rate_limit;
1210 };
1211 
1212 enum ib_wr_opcode {
1213         IB_WR_RDMA_WRITE,
1214         IB_WR_RDMA_WRITE_WITH_IMM,
1215         IB_WR_SEND,
1216         IB_WR_SEND_WITH_IMM,
1217         IB_WR_RDMA_READ,
1218         IB_WR_ATOMIC_CMP_AND_SWP,
1219         IB_WR_ATOMIC_FETCH_AND_ADD,
1220         IB_WR_LSO,
1221         IB_WR_SEND_WITH_INV,
1222         IB_WR_RDMA_READ_WITH_INV,
1223         IB_WR_LOCAL_INV,
1224         IB_WR_REG_MR,
1225         IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
1226         IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1227         IB_WR_REG_SIG_MR,
1228         /* reserve values for low level drivers' internal use.
1229          * These values will not be used at all in the ib core layer.
1230          */
1231         IB_WR_RESERVED1 = 0xf0,
1232         IB_WR_RESERVED2,
1233         IB_WR_RESERVED3,
1234         IB_WR_RESERVED4,
1235         IB_WR_RESERVED5,
1236         IB_WR_RESERVED6,
1237         IB_WR_RESERVED7,
1238         IB_WR_RESERVED8,
1239         IB_WR_RESERVED9,
1240         IB_WR_RESERVED10,
1241 };
1242 
1243 enum ib_send_flags {
1244         IB_SEND_FENCE           = 1,
1245         IB_SEND_SIGNALED        = (1<<1),
1246         IB_SEND_SOLICITED       = (1<<2),
1247         IB_SEND_INLINE          = (1<<3),
1248         IB_SEND_IP_CSUM         = (1<<4),
1249 
1250         /* reserve bits 26-31 for low level drivers' internal use */
1251         IB_SEND_RESERVED_START  = (1 << 26),
1252         IB_SEND_RESERVED_END    = (1 << 31),
1253 };
1254 
1255 struct ib_sge {
1256         u64     addr;
1257         u32     length;
1258         u32     lkey;
1259 };
1260 
1261 struct ib_cqe {
1262         void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1263 };
1264 
1265 struct ib_send_wr {
1266         struct ib_send_wr      *next;
1267         union {
1268                 u64             wr_id;
1269                 struct ib_cqe   *wr_cqe;
1270         };
1271         struct ib_sge          *sg_list;
1272         int                     num_sge;
1273         enum ib_wr_opcode       opcode;
1274         int                     send_flags;
1275         union {
1276                 __be32          imm_data;
1277                 u32             invalidate_rkey;
1278         } ex;
1279 };
1280 
1281 struct ib_rdma_wr {
1282         struct ib_send_wr       wr;
1283         u64                     remote_addr;
1284         u32                     rkey;
1285 };
1286 
1287 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
1288 {
1289         return container_of(wr, struct ib_rdma_wr, wr);
1290 }
1291 
1292 struct ib_atomic_wr {
1293         struct ib_send_wr       wr;
1294         u64                     remote_addr;
1295         u64                     compare_add;
1296         u64                     swap;
1297         u64                     compare_add_mask;
1298         u64                     swap_mask;
1299         u32                     rkey;
1300 };
1301 
1302 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
1303 {
1304         return container_of(wr, struct ib_atomic_wr, wr);
1305 }
1306 
1307 struct ib_ud_wr {
1308         struct ib_send_wr       wr;
1309         struct ib_ah            *ah;
1310         void                    *header;
1311         int                     hlen;
1312         int                     mss;
1313         u32                     remote_qpn;
1314         u32                     remote_qkey;
1315         u16                     pkey_index; /* valid for GSI only */
1316         u8                      port_num;   /* valid for DR SMPs on switch only */
1317 };
1318 
1319 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
1320 {
1321         return container_of(wr, struct ib_ud_wr, wr);
1322 }
1323 
1324 struct ib_reg_wr {
1325         struct ib_send_wr       wr;
1326         struct ib_mr            *mr;
1327         u32                     key;
1328         int                     access;
1329 };
1330 
1331 static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
1332 {
1333         return container_of(wr, struct ib_reg_wr, wr);
1334 }
1335 
1336 struct ib_sig_handover_wr {
1337         struct ib_send_wr       wr;
1338         struct ib_sig_attrs    *sig_attrs;
1339         struct ib_mr           *sig_mr;
1340         int                     access_flags;
1341         struct ib_sge          *prot;
1342 };
1343 
1344 static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
1345 {
1346         return container_of(wr, struct ib_sig_handover_wr, wr);
1347 }
1348 
1349 struct ib_recv_wr {
1350         struct ib_recv_wr      *next;
1351         union {
1352                 u64             wr_id;
1353                 struct ib_cqe   *wr_cqe;
1354         };
1355         struct ib_sge          *sg_list;
1356         int                     num_sge;
1357 };
1358 
1359 enum ib_access_flags {
1360         IB_ACCESS_LOCAL_WRITE   = 1,
1361         IB_ACCESS_REMOTE_WRITE  = (1<<1),
1362         IB_ACCESS_REMOTE_READ   = (1<<2),
1363         IB_ACCESS_REMOTE_ATOMIC = (1<<3),
1364         IB_ACCESS_MW_BIND       = (1<<4),
1365         IB_ZERO_BASED           = (1<<5),
1366         IB_ACCESS_ON_DEMAND     = (1<<6),
1367         IB_ACCESS_HUGETLB       = (1<<7),
1368 };
1369 
1370 /*
1371  * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1372  * are hidden here instead of a uapi header!
1373  */
1374 enum ib_mr_rereg_flags {
1375         IB_MR_REREG_TRANS       = 1,
1376         IB_MR_REREG_PD          = (1<<1),
1377         IB_MR_REREG_ACCESS      = (1<<2),
1378         IB_MR_REREG_SUPPORTED   = ((IB_MR_REREG_ACCESS << 1) - 1)
1379 };
1380 
1381 struct ib_fmr_attr {
1382         int     max_pages;
1383         int     max_maps;
1384         u8      page_shift;
1385 };
1386 
1387 struct ib_umem;
1388 
1389 enum rdma_remove_reason {
1390         /* Userspace requested uobject deletion. Call could fail */
1391         RDMA_REMOVE_DESTROY,
1392         /* Context deletion. This call should delete the actual object itself */
1393         RDMA_REMOVE_CLOSE,
1394         /* Driver is being hot-unplugged. This call should delete the actual object itself */
1395         RDMA_REMOVE_DRIVER_REMOVE,
1396         /* Context is being cleaned-up, but commit was just completed */
1397         RDMA_REMOVE_DURING_CLEANUP,
1398 };
1399 
1400 struct ib_rdmacg_object {
1401 #ifdef CONFIG_CGROUP_RDMA
1402         struct rdma_cgroup      *cg;            /* owner rdma cgroup */
1403 #endif
1404 };
1405 
1406 struct ib_ucontext {
1407         struct ib_device       *device;
1408         struct ib_uverbs_file  *ufile;
1409         int                     closing;
1410 
1411         /* locking the uobjects_list */
1412         struct mutex            uobjects_lock;
1413         struct list_head        uobjects;
1414         /* protects cleanup process from other actions */
1415         struct rw_semaphore     cleanup_rwsem;
1416         enum rdma_remove_reason cleanup_reason;
1417 
1418         struct pid             *tgid;
1419 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1420         struct rb_root      umem_tree;
1421         /*
1422          * Protects .umem_rbroot and tree, as well as odp_mrs_count and
1423          * mmu notifiers registration.
1424          */
1425         struct rw_semaphore     umem_rwsem;
1426         void (*invalidate_range)(struct ib_umem *umem,
1427                                  unsigned long start, unsigned long end);
1428 
1429         struct mmu_notifier     mn;
1430         atomic_t                notifier_count;
1431         /* A list of umems that don't have private mmu notifier counters yet. */
1432         struct list_head        no_private_counters;
1433         int                     odp_mrs_count;
1434 #endif
1435 
1436         struct ib_rdmacg_object cg_obj;
1437 };
1438 
1439 struct ib_uobject {
1440         u64                     user_handle;    /* handle given to us by userspace */
1441         struct ib_ucontext     *context;        /* associated user context */
1442         void                   *object;         /* containing object */
1443         struct list_head        list;           /* link to context's list */
1444         struct ib_rdmacg_object cg_obj;         /* rdmacg object */
1445         int                     id;             /* index into kernel idr */
1446         struct kref             ref;
1447         atomic_t                usecnt;         /* protects exclusive access */
1448         struct rcu_head         rcu;            /* kfree_rcu() overhead */
1449 
1450         const struct uverbs_obj_type *type;
1451 };
1452 
1453 struct ib_uobject_file {
1454         struct ib_uobject       uobj;
1455         /* ufile contains the lock between context release and file close */
1456         struct ib_uverbs_file   *ufile;
1457 };
1458 
1459 struct ib_udata {
1460         const void __user *inbuf;
1461         void __user *outbuf;
1462         size_t       inlen;
1463         size_t       outlen;
1464 };
1465 
1466 struct ib_pd {
1467         u32                     local_dma_lkey;
1468         u32                     flags;
1469         struct ib_device       *device;
1470         struct ib_uobject      *uobject;
1471         atomic_t                usecnt; /* count all resources */
1472 
1473         u32                     unsafe_global_rkey;
1474 
1475         /*
1476          * Implementation details of the RDMA core, don't use in drivers:
1477          */
1478         struct ib_mr           *__internal_mr;
1479 };
1480 
1481 struct ib_xrcd {
1482         struct ib_device       *device;
1483         atomic_t                usecnt; /* count all exposed resources */
1484         struct inode           *inode;
1485 
1486         struct mutex            tgt_qp_mutex;
1487         struct list_head        tgt_qp_list;
1488 };
1489 
1490 struct ib_ah {
1491         struct ib_device        *device;
1492         struct ib_pd            *pd;
1493         struct ib_uobject       *uobject;
1494         enum rdma_ah_attr_type  type;
1495 };
1496 
1497 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1498 
1499 enum ib_poll_context {
1500         IB_POLL_DIRECT,         /* caller context, no hw completions */
1501         IB_POLL_SOFTIRQ,        /* poll from softirq context */
1502         IB_POLL_WORKQUEUE,      /* poll from workqueue */
1503 };
1504 
1505 struct ib_cq {
1506         struct ib_device       *device;
1507         struct ib_uobject      *uobject;
1508         ib_comp_handler         comp_handler;
1509         void                  (*event_handler)(struct ib_event *, void *);
1510         void                   *cq_context;
1511         int                     cqe;
1512         atomic_t                usecnt; /* count number of work queues */
1513         enum ib_poll_context    poll_ctx;
1514         struct ib_wc            *wc;
1515         union {
1516                 struct irq_poll         iop;
1517                 struct work_struct      work;
1518         };
1519 };
1520 
1521 struct ib_srq {
1522         struct ib_device       *device;
1523         struct ib_pd           *pd;
1524         struct ib_uobject      *uobject;
1525         void                  (*event_handler)(struct ib_event *, void *);
1526         void                   *srq_context;
1527         enum ib_srq_type        srq_type;
1528         atomic_t                usecnt;
1529 
1530         union {
1531                 struct {
1532                         struct ib_xrcd *xrcd;
1533                         struct ib_cq   *cq;
1534                         u32             srq_num;
1535                 } xrc;
1536         } ext;
1537 };
1538 
1539 enum ib_raw_packet_caps {
1540         /* Strip cvlan from incoming packet and report it in the matching work
1541          * completion is supported.
1542          */
1543         IB_RAW_PACKET_CAP_CVLAN_STRIPPING       = (1 << 0),
1544         /* Scatter FCS field of an incoming packet to host memory is supported.
1545          */
1546         IB_RAW_PACKET_CAP_SCATTER_FCS           = (1 << 1),
1547         /* Checksum offloads are supported (for both send and receive). */
1548         IB_RAW_PACKET_CAP_IP_CSUM               = (1 << 2),
1549 };
1550 
1551 enum ib_wq_type {
1552         IB_WQT_RQ
1553 };
1554 
1555 enum ib_wq_state {
1556         IB_WQS_RESET,
1557         IB_WQS_RDY,
1558         IB_WQS_ERR
1559 };
1560 
1561 struct ib_wq {
1562         struct ib_device       *device;
1563         struct ib_uobject      *uobject;
1564         void                *wq_context;
1565         void                (*event_handler)(struct ib_event *, void *);
1566         struct ib_pd           *pd;
1567         struct ib_cq           *cq;
1568         u32             wq_num;
1569         enum ib_wq_state       state;
1570         enum ib_wq_type wq_type;
1571         atomic_t                usecnt;
1572 };
1573 
1574 enum ib_wq_flags {
1575         IB_WQ_FLAGS_CVLAN_STRIPPING     = 1 << 0,
1576         IB_WQ_FLAGS_SCATTER_FCS         = 1 << 1,
1577 };
1578 
1579 struct ib_wq_init_attr {
1580         void                   *wq_context;
1581         enum ib_wq_type wq_type;
1582         u32             max_wr;
1583         u32             max_sge;
1584         struct  ib_cq          *cq;
1585         void                (*event_handler)(struct ib_event *, void *);
1586         u32             create_flags; /* Use enum ib_wq_flags */
1587 };
1588 
1589 enum ib_wq_attr_mask {
1590         IB_WQ_STATE             = 1 << 0,
1591         IB_WQ_CUR_STATE         = 1 << 1,
1592         IB_WQ_FLAGS             = 1 << 2,
1593 };
1594 
1595 struct ib_wq_attr {
1596         enum    ib_wq_state     wq_state;
1597         enum    ib_wq_state     curr_wq_state;
1598         u32                     flags; /* Use enum ib_wq_flags */
1599         u32                     flags_mask; /* Use enum ib_wq_flags */
1600 };
1601 
1602 struct ib_rwq_ind_table {
1603         struct ib_device        *device;
1604         struct ib_uobject      *uobject;
1605         atomic_t                usecnt;
1606         u32             ind_tbl_num;
1607         u32             log_ind_tbl_size;
1608         struct ib_wq    **ind_tbl;
1609 };
1610 
1611 struct ib_rwq_ind_table_init_attr {
1612         u32             log_ind_tbl_size;
1613         /* Each entry is a pointer to Receive Work Queue */
1614         struct ib_wq    **ind_tbl;
1615 };
1616 
1617 enum port_pkey_state {
1618         IB_PORT_PKEY_NOT_VALID = 0,
1619         IB_PORT_PKEY_VALID = 1,
1620         IB_PORT_PKEY_LISTED = 2,
1621 };
1622 
1623 struct ib_qp_security;
1624 
1625 struct ib_port_pkey {
1626         enum port_pkey_state    state;
1627         u16                     pkey_index;
1628         u8                      port_num;
1629         struct list_head        qp_list;
1630         struct list_head        to_error_list;
1631         struct ib_qp_security  *sec;
1632 };
1633 
1634 struct ib_ports_pkeys {
1635         struct ib_port_pkey     main;
1636         struct ib_port_pkey     alt;
1637 };
1638 
1639 struct ib_qp_security {
1640         struct ib_qp           *qp;
1641         struct ib_device       *dev;
1642         /* Hold this mutex when changing port and pkey settings. */
1643         struct mutex            mutex;
1644         struct ib_ports_pkeys  *ports_pkeys;
1645         /* A list of all open shared QP handles.  Required to enforce security
1646          * properly for all users of a shared QP.
1647          */
1648         struct list_head        shared_qp_list;
1649         void                   *security;
1650         bool                    destroying;
1651         atomic_t                error_list_count;
1652         struct completion       error_complete;
1653         int                     error_comps_pending;
1654 };
1655 
1656 /*
1657  * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1658  * @max_read_sge:  Maximum SGE elements per RDMA READ request.
1659  */
1660 struct ib_qp {
1661         struct ib_device       *device;
1662         struct ib_pd           *pd;
1663         struct ib_cq           *send_cq;
1664         struct ib_cq           *recv_cq;
1665         spinlock_t              mr_lock;
1666         int                     mrs_used;
1667         struct list_head        rdma_mrs;
1668         struct list_head        sig_mrs;
1669         struct ib_srq          *srq;
1670         struct ib_xrcd         *xrcd; /* XRC TGT QPs only */
1671         struct list_head        xrcd_list;
1672 
1673         /* count times opened, mcast attaches, flow attaches */
1674         atomic_t                usecnt;
1675         struct list_head        open_list;
1676         struct ib_qp           *real_qp;
1677         struct ib_uobject      *uobject;
1678         void                  (*event_handler)(struct ib_event *, void *);
1679         void                   *qp_context;
1680         u32                     qp_num;
1681         u32                     max_write_sge;
1682         u32                     max_read_sge;
1683         enum ib_qp_type         qp_type;
1684         struct ib_rwq_ind_table *rwq_ind_tbl;
1685         struct ib_qp_security  *qp_sec;
1686         u8                      port;
1687 };
1688 
1689 struct ib_mr {
1690         struct ib_device  *device;
1691         struct ib_pd      *pd;
1692         u32                lkey;
1693         u32                rkey;
1694         u64                iova;
1695         u32                length;
1696         unsigned int       page_size;
1697         bool               need_inval;
1698         union {
1699                 struct ib_uobject       *uobject;       /* user */
1700                 struct list_head        qp_entry;       /* FR */
1701         };
1702 };
1703 
1704 struct ib_mw {
1705         struct ib_device        *device;
1706         struct ib_pd            *pd;
1707         struct ib_uobject       *uobject;
1708         u32                     rkey;
1709         enum ib_mw_type         type;
1710 };
1711 
1712 struct ib_fmr {
1713         struct ib_device        *device;
1714         struct ib_pd            *pd;
1715         struct list_head        list;
1716         u32                     lkey;
1717         u32                     rkey;
1718 };
1719 
1720 /* Supported steering options */
1721 enum ib_flow_attr_type {
1722         /* steering according to rule specifications */
1723         IB_FLOW_ATTR_NORMAL             = 0x0,
1724         /* default unicast and multicast rule -
1725          * receive all Eth traffic which isn't steered to any QP
1726          */
1727         IB_FLOW_ATTR_ALL_DEFAULT        = 0x1,
1728         /* default multicast rule -
1729          * receive all Eth multicast traffic which isn't steered to any QP
1730          */
1731         IB_FLOW_ATTR_MC_DEFAULT         = 0x2,
1732         /* sniffer rule - receive all port traffic */
1733         IB_FLOW_ATTR_SNIFFER            = 0x3
1734 };
1735 
1736 /* Supported steering header types */
1737 enum ib_flow_spec_type {
1738         /* L2 headers*/
1739         IB_FLOW_SPEC_ETH                = 0x20,
1740         IB_FLOW_SPEC_IB                 = 0x22,
1741         /* L3 header*/
1742         IB_FLOW_SPEC_IPV4               = 0x30,
1743         IB_FLOW_SPEC_IPV6               = 0x31,
1744         /* L4 headers*/
1745         IB_FLOW_SPEC_TCP                = 0x40,
1746         IB_FLOW_SPEC_UDP                = 0x41,
1747         IB_FLOW_SPEC_VXLAN_TUNNEL       = 0x50,
1748         IB_FLOW_SPEC_INNER              = 0x100,
1749         /* Actions */
1750         IB_FLOW_SPEC_ACTION_TAG         = 0x1000,
1751         IB_FLOW_SPEC_ACTION_DROP        = 0x1001,
1752 };
1753 #define IB_FLOW_SPEC_LAYER_MASK 0xF0
1754 #define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1755 
1756 /* Flow steering rule priority is set according to it's domain.
1757  * Lower domain value means higher priority.
1758  */
1759 enum ib_flow_domain {
1760         IB_FLOW_DOMAIN_USER,
1761         IB_FLOW_DOMAIN_ETHTOOL,
1762         IB_FLOW_DOMAIN_RFS,
1763         IB_FLOW_DOMAIN_NIC,
1764         IB_FLOW_DOMAIN_NUM /* Must be last */
1765 };
1766 
1767 enum ib_flow_flags {
1768         IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
1769         IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 2  /* Must be last */
1770 };
1771 
1772 struct ib_flow_eth_filter {
1773         u8      dst_mac[6];
1774         u8      src_mac[6];
1775         __be16  ether_type;
1776         __be16  vlan_tag;
1777         /* Must be last */
1778         u8      real_sz[0];
1779 };
1780 
1781 struct ib_flow_spec_eth {
1782         u32                       type;
1783         u16                       size;
1784         struct ib_flow_eth_filter val;
1785         struct ib_flow_eth_filter mask;
1786 };
1787 
1788 struct ib_flow_ib_filter {
1789         __be16 dlid;
1790         __u8   sl;
1791         /* Must be last */
1792         u8      real_sz[0];
1793 };
1794 
1795 struct ib_flow_spec_ib {
1796         u32                      type;
1797         u16                      size;
1798         struct ib_flow_ib_filter val;
1799         struct ib_flow_ib_filter mask;
1800 };
1801 
1802 /* IPv4 header flags */
1803 enum ib_ipv4_flags {
1804         IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
1805         IB_IPV4_MORE_FRAG = 0X4  /* For All fragmented packets except the
1806                                     last have this flag set */
1807 };
1808 
1809 struct ib_flow_ipv4_filter {
1810         __be32  src_ip;
1811         __be32  dst_ip;
1812         u8      proto;
1813         u8      tos;
1814         u8      ttl;
1815         u8      flags;
1816         /* Must be last */
1817         u8      real_sz[0];
1818 };
1819 
1820 struct ib_flow_spec_ipv4 {
1821         u32                        type;
1822         u16                        size;
1823         struct ib_flow_ipv4_filter val;
1824         struct ib_flow_ipv4_filter mask;
1825 };
1826 
1827 struct ib_flow_ipv6_filter {
1828         u8      src_ip[16];
1829         u8      dst_ip[16];
1830         __be32  flow_label;
1831         u8      next_hdr;
1832         u8      traffic_class;
1833         u8      hop_limit;
1834         /* Must be last */
1835         u8      real_sz[0];
1836 };
1837 
1838 struct ib_flow_spec_ipv6 {
1839         u32                        type;
1840         u16                        size;
1841         struct ib_flow_ipv6_filter val;
1842         struct ib_flow_ipv6_filter mask;
1843 };
1844 
1845 struct ib_flow_tcp_udp_filter {
1846         __be16  dst_port;
1847         __be16  src_port;
1848         /* Must be last */
1849         u8      real_sz[0];
1850 };
1851 
1852 struct ib_flow_spec_tcp_udp {
1853         u32                           type;
1854         u16                           size;
1855         struct ib_flow_tcp_udp_filter val;
1856         struct ib_flow_tcp_udp_filter mask;
1857 };
1858 
1859 struct ib_flow_tunnel_filter {
1860         __be32  tunnel_id;
1861         u8      real_sz[0];
1862 };
1863 
1864 /* ib_flow_spec_tunnel describes the Vxlan tunnel
1865  * the tunnel_id from val has the vni value
1866  */
1867 struct ib_flow_spec_tunnel {
1868         u32                           type;
1869         u16                           size;
1870         struct ib_flow_tunnel_filter  val;
1871         struct ib_flow_tunnel_filter  mask;
1872 };
1873 
1874 struct ib_flow_spec_action_tag {
1875         enum ib_flow_spec_type        type;
1876         u16                           size;
1877         u32                           tag_id;
1878 };
1879 
1880 struct ib_flow_spec_action_drop {
1881         enum ib_flow_spec_type        type;
1882         u16                           size;
1883 };
1884 
1885 union ib_flow_spec {
1886         struct {
1887                 u32                     type;
1888                 u16                     size;
1889         };
1890         struct ib_flow_spec_eth         eth;
1891         struct ib_flow_spec_ib          ib;
1892         struct ib_flow_spec_ipv4        ipv4;
1893         struct ib_flow_spec_tcp_udp     tcp_udp;
1894         struct ib_flow_spec_ipv6        ipv6;
1895         struct ib_flow_spec_tunnel      tunnel;
1896         struct ib_flow_spec_action_tag  flow_tag;
1897         struct ib_flow_spec_action_drop drop;
1898 };
1899 
1900 struct ib_flow_attr {
1901         enum ib_flow_attr_type type;
1902         u16          size;
1903         u16          priority;
1904         u32          flags;
1905         u8           num_of_specs;
1906         u8           port;
1907         /* Following are the optional layers according to user request
1908          * struct ib_flow_spec_xxx
1909          * struct ib_flow_spec_yyy
1910          */
1911 };
1912 
1913 struct ib_flow {
1914         struct ib_qp            *qp;
1915         struct ib_uobject       *uobject;
1916 };
1917 
1918 struct ib_mad_hdr;
1919 struct ib_grh;
1920 
1921 enum ib_process_mad_flags {
1922         IB_MAD_IGNORE_MKEY      = 1,
1923         IB_MAD_IGNORE_BKEY      = 2,
1924         IB_MAD_IGNORE_ALL       = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
1925 };
1926 
1927 enum ib_mad_result {
1928         IB_MAD_RESULT_FAILURE  = 0,      /* (!SUCCESS is the important flag) */
1929         IB_MAD_RESULT_SUCCESS  = 1 << 0, /* MAD was successfully processed   */
1930         IB_MAD_RESULT_REPLY    = 1 << 1, /* Reply packet needs to be sent    */
1931         IB_MAD_RESULT_CONSUMED = 1 << 2  /* Packet consumed: stop processing */
1932 };
1933 
1934 struct ib_port_cache {
1935         u64                   subnet_prefix;
1936         struct ib_pkey_cache  *pkey;
1937         struct ib_gid_table   *gid;
1938         u8                     lmc;
1939         enum ib_port_state     port_state;
1940 };
1941 
1942 struct ib_cache {
1943         rwlock_t                lock;
1944         struct ib_event_handler event_handler;
1945         struct ib_port_cache   *ports;
1946 };
1947 
1948 struct iw_cm_verbs;
1949 
1950 struct ib_port_immutable {
1951         int                           pkey_tbl_len;
1952         int                           gid_tbl_len;
1953         u32                           core_cap_flags;
1954         u32                           max_mad_size;
1955 };
1956 
1957 /* rdma netdev type - specifies protocol type */
1958 enum rdma_netdev_t {
1959         RDMA_NETDEV_OPA_VNIC,
1960         RDMA_NETDEV_IPOIB,
1961 };
1962 
1963 /**
1964  * struct rdma_netdev - rdma netdev
1965  * For cases where netstack interfacing is required.
1966  */
1967 struct rdma_netdev {
1968         void              *clnt_priv;
1969         struct ib_device  *hca;
1970         u8                 port_num;
1971 
1972         /* cleanup function must be specified */
1973         void (*free_rdma_netdev)(struct net_device *netdev);
1974 
1975         /* control functions */
1976         void (*set_id)(struct net_device *netdev, int id);
1977         /* send packet */
1978         int (*send)(struct net_device *dev, struct sk_buff *skb,
1979                     struct ib_ah *address, u32 dqpn);
1980         /* multicast */
1981         int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
1982                             union ib_gid *gid, u16 mlid,
1983                             int set_qkey, u32 qkey);
1984         int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
1985                             union ib_gid *gid, u16 mlid);
1986 };
1987 
1988 struct ib_port_pkey_list {
1989         /* Lock to hold while modifying the list. */
1990         spinlock_t                    list_lock;
1991         struct list_head              pkey_list;
1992 };
1993 
1994 struct ib_device {
1995         /* Do not access @dma_device directly from ULP nor from HW drivers. */
1996         struct device                *dma_device;
1997 
1998         char                          name[IB_DEVICE_NAME_MAX];
1999 
2000         struct list_head              event_handler_list;
2001         spinlock_t                    event_handler_lock;
2002 
2003         spinlock_t                    client_data_lock;
2004         struct list_head              core_list;
2005         /* Access to the client_data_list is protected by the client_data_lock
2006          * spinlock and the lists_rwsem read-write semaphore */
2007         struct list_head              client_data_list;
2008 
2009         struct ib_cache               cache;
2010         /**
2011          * port_immutable is indexed by port number
2012          */
2013         struct ib_port_immutable     *port_immutable;
2014 
2015         int                           num_comp_vectors;
2016 
2017         struct ib_port_pkey_list     *port_pkey_list;
2018 
2019         struct iw_cm_verbs           *iwcm;
2020 
2021         /**
2022          * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
2023          *   driver initialized data.  The struct is kfree()'ed by the sysfs
2024          *   core when the device is removed.  A lifespan of -1 in the return
2025          *   struct tells the core to set a default lifespan.
2026          */
2027         struct rdma_hw_stats      *(*alloc_hw_stats)(struct ib_device *device,
2028                                                      u8 port_num);
2029         /**
2030          * get_hw_stats - Fill in the counter value(s) in the stats struct.
2031          * @index - The index in the value array we wish to have updated, or
2032          *   num_counters if we want all stats updated
2033          * Return codes -
2034          *   < 0 - Error, no counters updated
2035          *   index - Updated the single counter pointed to by index
2036          *   num_counters - Updated all counters (will reset the timestamp
2037          *     and prevent further calls for lifespan milliseconds)
2038          * Drivers are allowed to update all counters in leiu of just the
2039          *   one given in index at their option
2040          */
2041         int                        (*get_hw_stats)(struct ib_device *device,
2042                                                    struct rdma_hw_stats *stats,
2043                                                    u8 port, int index);
2044         int                        (*query_device)(struct ib_device *device,
2045                                                    struct ib_device_attr *device_attr,
2046                                                    struct ib_udata *udata);
2047         int                        (*query_port)(struct ib_device *device,
2048                                                  u8 port_num,
2049                                                  struct ib_port_attr *port_attr);
2050         enum rdma_link_layer       (*get_link_layer)(struct ib_device *device,
2051                                                      u8 port_num);
2052         /* When calling get_netdev, the HW vendor's driver should return the
2053          * net device of device @device at port @port_num or NULL if such
2054          * a net device doesn't exist. The vendor driver should call dev_hold
2055          * on this net device. The HW vendor's device driver must guarantee
2056          * that this function returns NULL before the net device reaches
2057          * NETDEV_UNREGISTER_FINAL state.
2058          */
2059         struct net_device         *(*get_netdev)(struct ib_device *device,
2060                                                  u8 port_num);
2061         int                        (*query_gid)(struct ib_device *device,
2062                                                 u8 port_num, int index,
2063                                                 union ib_gid *gid);
2064         /* When calling add_gid, the HW vendor's driver should
2065          * add the gid of device @device at gid index @index of
2066          * port @port_num to be @gid. Meta-info of that gid (for example,
2067          * the network device related to this gid is available
2068          * at @attr. @context allows the HW vendor driver to store extra
2069          * information together with a GID entry. The HW vendor may allocate
2070          * memory to contain this information and store it in @context when a
2071          * new GID entry is written to. Params are consistent until the next
2072          * call of add_gid or delete_gid. The function should return 0 on
2073          * success or error otherwise. The function could be called
2074          * concurrently for different ports. This function is only called
2075          * when roce_gid_table is used.
2076          */
2077         int                        (*add_gid)(struct ib_device *device,
2078                                               u8 port_num,
2079                                               unsigned int index,
2080                                               const union ib_gid *gid,
2081                                               const struct ib_gid_attr *attr,
2082                                               void **context);
2083         /* When calling del_gid, the HW vendor's driver should delete the
2084          * gid of device @device at gid index @index of port @port_num.
2085          * Upon the deletion of a GID entry, the HW vendor must free any
2086          * allocated memory. The caller will clear @context afterwards.
2087          * This function is only called when roce_gid_table is used.
2088          */
2089         int                        (*del_gid)(struct ib_device *device,
2090                                               u8 port_num,
2091                                               unsigned int index,
2092                                               void **context);
2093         int                        (*query_pkey)(struct ib_device *device,
2094                                                  u8 port_num, u16 index, u16 *pkey);
2095         int                        (*modify_device)(struct ib_device *device,
2096                                                     int device_modify_mask,
2097                                                     struct ib_device_modify *device_modify);
2098         int                        (*modify_port)(struct ib_device *device,
2099                                                   u8 port_num, int port_modify_mask,
2100                                                   struct ib_port_modify *port_modify);
2101         struct ib_ucontext *       (*alloc_ucontext)(struct ib_device *device,
2102                                                      struct ib_udata *udata);
2103         int                        (*dealloc_ucontext)(struct ib_ucontext *context);
2104         int                        (*mmap)(struct ib_ucontext *context,
2105                                            struct vm_area_struct *vma);
2106         struct ib_pd *             (*alloc_pd)(struct ib_device *device,
2107                                                struct ib_ucontext *context,
2108                                                struct ib_udata *udata);
2109         int                        (*dealloc_pd)(struct ib_pd *pd);
2110         struct ib_ah *             (*create_ah)(struct ib_pd *pd,
2111                                                 struct rdma_ah_attr *ah_attr,
2112                                                 struct ib_udata *udata);
2113         int                        (*modify_ah)(struct ib_ah *ah,
2114                                                 struct rdma_ah_attr *ah_attr);
2115         int                        (*query_ah)(struct ib_ah *ah,
2116                                                struct rdma_ah_attr *ah_attr);
2117         int                        (*destroy_ah)(struct ib_ah *ah);
2118         struct ib_srq *            (*create_srq)(struct ib_pd *pd,
2119                                                  struct ib_srq_init_attr *srq_init_attr,
2120                                                  struct ib_udata *udata);
2121         int                        (*modify_srq)(struct ib_srq *srq,
2122                                                  struct ib_srq_attr *srq_attr,
2123                                                  enum ib_srq_attr_mask srq_attr_mask,
2124                                                  struct ib_udata *udata);
2125         int                        (*query_srq)(struct ib_srq *srq,
2126                                                 struct ib_srq_attr *srq_attr);
2127         int                        (*destroy_srq)(struct ib_srq *srq);
2128         int                        (*post_srq_recv)(struct ib_srq *srq,
2129                                                     struct ib_recv_wr *recv_wr,
2130                                                     struct ib_recv_wr **bad_recv_wr);
2131         struct ib_qp *             (*create_qp)(struct ib_pd *pd,
2132                                                 struct ib_qp_init_attr *qp_init_attr,
2133                                                 struct ib_udata *udata);
2134         int                        (*modify_qp)(struct ib_qp *qp,
2135                                                 struct ib_qp_attr *qp_attr,
2136                                                 int qp_attr_mask,
2137                                                 struct ib_udata *udata);
2138         int                        (*query_qp)(struct ib_qp *qp,
2139                                                struct ib_qp_attr *qp_attr,
2140                                                int qp_attr_mask,
2141                                                struct ib_qp_init_attr *qp_init_attr);
2142         int                        (*destroy_qp)(struct ib_qp *qp);
2143         int                        (*post_send)(struct ib_qp *qp,
2144                                                 struct ib_send_wr *send_wr,
2145                                                 struct ib_send_wr **bad_send_wr);
2146         int                        (*post_recv)(struct ib_qp *qp,
2147                                                 struct ib_recv_wr *recv_wr,
2148                                                 struct ib_recv_wr **bad_recv_wr);
2149         struct ib_cq *             (*create_cq)(struct ib_device *device,
2150                                                 const struct ib_cq_init_attr *attr,
2151                                                 struct ib_ucontext *context,
2152                                                 struct ib_udata *udata);
2153         int                        (*modify_cq)(struct ib_cq *cq, u16 cq_count,
2154                                                 u16 cq_period);
2155         int                        (*destroy_cq)(struct ib_cq *cq);
2156         int                        (*resize_cq)(struct ib_cq *cq, int cqe,
2157                                                 struct ib_udata *udata);
2158         int                        (*poll_cq)(struct ib_cq *cq, int num_entries,
2159                                               struct ib_wc *wc);
2160         int                        (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2161         int                        (*req_notify_cq)(struct ib_cq *cq,
2162                                                     enum ib_cq_notify_flags flags);
2163         int                        (*req_ncomp_notif)(struct ib_cq *cq,
2164                                                       int wc_cnt);
2165         struct ib_mr *             (*get_dma_mr)(struct ib_pd *pd,
2166                                                  int mr_access_flags);
2167         struct ib_mr *             (*reg_user_mr)(struct ib_pd *pd,
2168                                                   u64 start, u64 length,
2169                                                   u64 virt_addr,
2170                                                   int mr_access_flags,
2171                                                   struct ib_udata *udata);
2172         int                        (*rereg_user_mr)(struct ib_mr *mr,
2173                                                     int flags,
2174                                                     u64 start, u64 length,
2175                                                     u64 virt_addr,
2176                                                     int mr_access_flags,
2177                                                     struct ib_pd *pd,
2178                                                     struct ib_udata *udata);
2179         int                        (*dereg_mr)(struct ib_mr *mr);
2180         struct ib_mr *             (*alloc_mr)(struct ib_pd *pd,
2181                                                enum ib_mr_type mr_type,
2182                                                u32 max_num_sg);
2183         int                        (*map_mr_sg)(struct ib_mr *mr,
2184                                                 struct scatterlist *sg,
2185                                                 int sg_nents,
2186                                                 unsigned int *sg_offset);
2187         struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
2188                                                enum ib_mw_type type,
2189                                                struct ib_udata *udata);
2190         int                        (*dealloc_mw)(struct ib_mw *mw);
2191         struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
2192                                                 int mr_access_flags,
2193                                                 struct ib_fmr_attr *fmr_attr);
2194         int                        (*map_phys_fmr)(struct ib_fmr *fmr,
2195                                                    u64 *page_list, int list_len,
2196                                                    u64 iova);
2197         int                        (*unmap_fmr)(struct list_head *fmr_list);
2198         int                        (*dealloc_fmr)(struct ib_fmr *fmr);
2199         int                        (*attach_mcast)(struct ib_qp *qp,
2200                                                    union ib_gid *gid,
2201                                                    u16 lid);
2202         int                        (*detach_mcast)(struct ib_qp *qp,
2203                                                    union ib_gid *gid,
2204                                                    u16 lid);
2205         int                        (*process_mad)(struct ib_device *device,
2206                                                   int process_mad_flags,
2207                                                   u8 port_num,
2208                                                   const struct ib_wc *in_wc,
2209                                                   const struct ib_grh *in_grh,
2210                                                   const struct ib_mad_hdr *in_mad,
2211                                                   size_t in_mad_size,
2212                                                   struct ib_mad_hdr *out_mad,
2213                                                   size_t *out_mad_size,
2214                                                   u16 *out_mad_pkey_index);
2215         struct ib_xrcd *           (*alloc_xrcd)(struct ib_device *device,
2216                                                  struct ib_ucontext *ucontext,
2217                                                  struct ib_udata *udata);
2218         int                        (*dealloc_xrcd)(struct ib_xrcd *xrcd);
2219         struct ib_flow *           (*create_flow)(struct ib_qp *qp,
2220                                                   struct ib_flow_attr
2221                                                   *flow_attr,
2222                                                   int domain);
2223         int                        (*destroy_flow)(struct ib_flow *flow_id);
2224         int                        (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2225                                                       struct ib_mr_status *mr_status);
2226         void                       (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2227         void                       (*drain_rq)(struct ib_qp *qp);
2228         void                       (*drain_sq)(struct ib_qp *qp);
2229         int                        (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
2230                                                         int state);
2231         int                        (*get_vf_config)(struct ib_device *device, int vf, u8 port,
2232                                                    struct ifla_vf_info *ivf);
2233         int                        (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
2234                                                    struct ifla_vf_stats *stats);
2235         int                        (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
2236                                                   int type);
2237         struct ib_wq *             (*create_wq)(struct ib_pd *pd,
2238                                                 struct ib_wq_init_attr *init_attr,
2239                                                 struct ib_udata *udata);
2240         int                        (*destroy_wq)(struct ib_wq *wq);
2241         int                        (*modify_wq)(struct ib_wq *wq,
2242                                                 struct ib_wq_attr *attr,
2243                                                 u32 wq_attr_mask,
2244                                                 struct ib_udata *udata);
2245         struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
2246                                                            struct ib_rwq_ind_table_init_attr *init_attr,
2247                                                            struct ib_udata *udata);
2248         int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2249         /**
2250          * rdma netdev operation
2251          *
2252          * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
2253          * doesn't support the specified rdma netdev type.
2254          */
2255         struct net_device *(*alloc_rdma_netdev)(
2256                                         struct ib_device *device,
2257                                         u8 port_num,
2258                                         enum rdma_netdev_t type,
2259                                         const char *name,
2260                                         unsigned char name_assign_type,
2261                                         void (*setup)(struct net_device *));
2262 
2263         struct module               *owner;
2264         struct device                dev;
2265         struct kobject               *ports_parent;
2266         struct list_head             port_list;
2267 
2268         enum {
2269                 IB_DEV_UNINITIALIZED,
2270                 IB_DEV_REGISTERED,
2271                 IB_DEV_UNREGISTERED
2272         }                            reg_state;
2273 
2274         int                          uverbs_abi_ver;
2275         u64                          uverbs_cmd_mask;
2276         u64                          uverbs_ex_cmd_mask;
2277 
2278         char                         node_desc[IB_DEVICE_NODE_DESC_MAX];
2279         __be64                       node_guid;
2280         u32                          local_dma_lkey;
2281         u16                          is_switch:1;
2282         u8                           node_type;
2283         u8                           phys_port_cnt;
2284         struct ib_device_attr        attrs;
2285         struct attribute_group       *hw_stats_ag;
2286         struct rdma_hw_stats         *hw_stats;
2287 
2288 #ifdef CONFIG_CGROUP_RDMA
2289         struct rdmacg_device         cg_device;
2290 #endif
2291 
2292         /**
2293          * The following mandatory functions are used only at device
2294          * registration.  Keep functions such as these at the end of this
2295          * structure to avoid cache line misses when accessing struct ib_device
2296          * in fast paths.
2297          */
2298         int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
2299         void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
2300 };
2301 
2302 struct ib_client {
2303         char  *name;
2304         void (*add)   (struct ib_device *);
2305         void (*remove)(struct ib_device *, void *client_data);
2306 
2307         /* Returns the net_dev belonging to this ib_client and matching the
2308          * given parameters.
2309          * @dev:         An RDMA device that the net_dev use for communication.
2310          * @port:        A physical port number on the RDMA device.
2311          * @pkey:        P_Key that the net_dev uses if applicable.
2312          * @gid:         A GID that the net_dev uses to communicate.
2313          * @addr:        An IP address the net_dev is configured with.
2314          * @client_data: The device's client data set by ib_set_client_data().
2315          *
2316          * An ib_client that implements a net_dev on top of RDMA devices
2317          * (such as IP over IB) should implement this callback, allowing the
2318          * rdma_cm module to find the right net_dev for a given request.
2319          *
2320          * The caller is responsible for calling dev_put on the returned
2321          * netdev. */
2322         struct net_device *(*get_net_dev_by_params)(
2323                         struct ib_device *dev,
2324                         u8 port,
2325                         u16 pkey,
2326                         const union ib_gid *gid,
2327                         const struct sockaddr *addr,
2328                         void *client_data);
2329         struct list_head list;
2330 };
2331 
2332 struct ib_device *ib_alloc_device(size_t size);
2333 void ib_dealloc_device(struct ib_device *device);
2334 
2335 void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
2336 
2337 int ib_register_device(struct ib_device *device,
2338                        int (*port_callback)(struct ib_device *,
2339                                             u8, struct kobject *));
2340 void ib_unregister_device(struct ib_device *device);
2341 
2342 int ib_register_client   (struct ib_client *client);
2343 void ib_unregister_client(struct ib_client *client);
2344 
2345 void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
2346 void  ib_set_client_data(struct ib_device *device, struct ib_client *client,
2347                          void *data);
2348 
2349 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
2350 {
2351         return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
2352 }
2353 
2354 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
2355 {
2356         return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
2357 }
2358 
2359 static inline bool ib_is_udata_cleared(struct ib_udata *udata,
2360                                        size_t offset,
2361                                        size_t len)
2362 {
2363         const void __user *p = udata->inbuf + offset;
2364         bool ret;
2365         u8 *buf;
2366 
2367         if (len > USHRT_MAX)
2368                 return false;
2369 
2370         buf = memdup_user(p, len);
2371         if (IS_ERR(buf))
2372                 return false;
2373 
2374         ret = !memchr_inv(buf, 0, len);
2375         kfree(buf);
2376         return ret;
2377 }
2378 
2379 /**
2380  * ib_modify_qp_is_ok - Check that the supplied attribute mask
2381  * contains all required attributes and no attributes not allowed for
2382  * the given QP state transition.
2383  * @cur_state: Current QP state
2384  * @next_state: Next QP state
2385  * @type: QP type
2386  * @mask: Mask of supplied QP attributes
2387  * @ll : link layer of port
2388  *
2389  * This function is a helper function that a low-level driver's
2390  * modify_qp method can use to validate the consumer's input.  It
2391  * checks that cur_state and next_state are valid QP states, that a
2392  * transition from cur_state to next_state is allowed by the IB spec,
2393  * and that the attribute mask supplied is allowed for the transition.
2394  */
2395 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
2396                        enum ib_qp_type type, enum ib_qp_attr_mask mask,
2397                        enum rdma_link_layer ll);
2398 
2399 int ib_register_event_handler  (struct ib_event_handler *event_handler);
2400 int ib_unregister_event_handler(struct ib_event_handler *event_handler);
2401 void ib_dispatch_event(struct ib_event *event);
2402 
2403 int ib_query_port(struct ib_device *device,
2404                   u8 port_num, struct ib_port_attr *port_attr);
2405 
2406 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
2407                                                u8 port_num);
2408 
2409 /**
2410  * rdma_cap_ib_switch - Check if the device is IB switch
2411  * @device: Device to check
2412  *
2413  * Device driver is responsible for setting is_switch bit on
2414  * in ib_device structure at init time.
2415  *
2416  * Return: true if the device is IB switch.
2417  */
2418 static inline bool rdma_cap_ib_switch(const struct ib_device *device)
2419 {
2420         return device->is_switch;
2421 }
2422 
2423 /**
2424  * rdma_start_port - Return the first valid port number for the device
2425  * specified
2426  *
2427  * @device: Device to be checked
2428  *
2429  * Return start port number
2430  */
2431 static inline u8 rdma_start_port(const struct ib_device *device)
2432 {
2433         return rdma_cap_ib_switch(device) ? 0 : 1;
2434 }
2435 
2436 /**
2437  * rdma_end_port - Return the last valid port number for the device
2438  * specified
2439  *
2440  * @device: Device to be checked
2441  *
2442  * Return last port number
2443  */
2444 static inline u8 rdma_end_port(const struct ib_device *device)
2445 {
2446         return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
2447 }
2448 
2449 static inline int rdma_is_port_valid(const struct ib_device *device,
2450                                      unsigned int port)
2451 {
2452         return (port >= rdma_start_port(device) &&
2453                 port <= rdma_end_port(device));
2454 }
2455 
2456 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
2457 {
2458         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
2459 }
2460 
2461 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num)
2462 {
2463         return device->port_immutable[port_num].core_cap_flags &
2464                 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
2465 }
2466 
2467 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num)
2468 {
2469         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
2470 }
2471 
2472 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num)
2473 {
2474         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
2475 }
2476 
2477 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num)
2478 {
2479         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
2480 }
2481 
2482 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num)
2483 {
2484         return rdma_protocol_ib(device, port_num) ||
2485                 rdma_protocol_roce(device, port_num);
2486 }
2487 
2488 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num)
2489 {
2490         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET;
2491 }
2492 
2493 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num)
2494 {
2495         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC;
2496 }
2497 
2498 /**
2499  * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
2500  * Management Datagrams.
2501  * @device: Device to check
2502  * @port_num: Port number to check
2503  *
2504  * Management Datagrams (MAD) are a required part of the InfiniBand
2505  * specification and are supported on all InfiniBand devices.  A slightly
2506  * extended version are also supported on OPA interfaces.
2507  *
2508  * Return: true if the port supports sending/receiving of MAD packets.
2509  */
2510 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
2511 {
2512         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD;
2513 }
2514 
2515 /**
2516  * rdma_cap_opa_mad - Check if the port of device provides support for OPA
2517  * Management Datagrams.
2518  * @device: Device to check
2519  * @port_num: Port number to check
2520  *
2521  * Intel OmniPath devices extend and/or replace the InfiniBand Management
2522  * datagrams with their own versions.  These OPA MADs share many but not all of
2523  * the characteristics of InfiniBand MADs.
2524  *
2525  * OPA MADs differ in the following ways:
2526  *
2527  *    1) MADs are variable size up to 2K
2528  *       IBTA defined MADs remain fixed at 256 bytes
2529  *    2) OPA SMPs must carry valid PKeys
2530  *    3) OPA SMP packets are a different format
2531  *
2532  * Return: true if the port supports OPA MAD packet formats.
2533  */
2534 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
2535 {
2536         return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD)
2537                 == RDMA_CORE_CAP_OPA_MAD;
2538 }
2539 
2540 /**
2541  * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
2542  * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
2543  * @device: Device to check
2544  * @port_num: Port number to check
2545  *
2546  * Each InfiniBand node is required to provide a Subnet Management Agent
2547  * that the subnet manager can access.  Prior to the fabric being fully
2548  * configured by the subnet manager, the SMA is accessed via a well known
2549  * interface called the Subnet Management Interface (SMI).  This interface
2550  * uses directed route packets to communicate with the SM to get around the
2551  * chicken and egg problem of the SM needing to know what's on the fabric
2552  * in order to configure the fabric, and needing to configure the fabric in
2553  * order to send packets to the devices on the fabric.  These directed
2554  * route packets do not need the fabric fully configured in order to reach
2555  * their destination.  The SMI is the only method allowed to send
2556  * directed route packets on an InfiniBand fabric.
2557  *
2558  * Return: true if the port provides an SMI.
2559  */
2560 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num)
2561 {
2562         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI;
2563 }
2564 
2565 /**
2566  * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
2567  * Communication Manager.
2568  * @device: Device to check
2569  * @port_num: Port number to check
2570  *
2571  * The InfiniBand Communication Manager is one of many pre-defined General
2572  * Service Agents (GSA) that are accessed via the General Service
2573  * Interface (GSI).  It's role is to facilitate establishment of connections
2574  * between nodes as well as other management related tasks for established
2575  * connections.
2576  *
2577  * Return: true if the port supports an IB CM (this does not guarantee that
2578  * a CM is actually running however).
2579  */
2580 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num)
2581 {
2582         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM;
2583 }
2584 
2585 /**
2586  * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
2587  * Communication Manager.
2588  * @device: Device to check
2589  * @port_num: Port number to check
2590  *
2591  * Similar to above, but specific to iWARP connections which have a different
2592  * managment protocol than InfiniBand.
2593  *
2594  * Return: true if the port supports an iWARP CM (this does not guarantee that
2595  * a CM is actually running however).
2596  */
2597 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num)
2598 {
2599         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM;
2600 }
2601 
2602 /**
2603  * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
2604  * Subnet Administration.
2605  * @device: Device to check
2606  * @port_num: Port number to check
2607  *
2608  * An InfiniBand Subnet Administration (SA) service is a pre-defined General
2609  * Service Agent (GSA) provided by the Subnet Manager (SM).  On InfiniBand
2610  * fabrics, devices should resolve routes to other hosts by contacting the
2611  * SA to query the proper route.
2612  *
2613  * Return: true if the port should act as a client to the fabric Subnet
2614  * Administration interface.  This does not imply that the SA service is
2615  * running locally.
2616  */
2617 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num)
2618 {
2619         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA;
2620 }
2621 
2622 /**
2623  * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
2624  * Multicast.
2625  * @device: Device to check
2626  * @port_num: Port number to check
2627  *
2628  * InfiniBand multicast registration is more complex than normal IPv4 or
2629  * IPv6 multicast registration.  Each Host Channel Adapter must register
2630  * with the Subnet Manager when it wishes to join a multicast group.  It
2631  * should do so only once regardless of how many queue pairs it subscribes
2632  * to this group.  And it should leave the group only after all queue pairs
2633  * attached to the group have been detached.
2634  *
2635  * Return: true if the port must undertake the additional adminstrative
2636  * overhead of registering/unregistering with the SM and tracking of the
2637  * total number of queue pairs attached to the multicast group.
2638  */
2639 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num)
2640 {
2641         return rdma_cap_ib_sa(device, port_num);
2642 }
2643 
2644 /**
2645  * rdma_cap_af_ib - Check if the port of device has the capability
2646  * Native Infiniband Address.
2647  * @device: Device to check
2648  * @port_num: Port number to check
2649  *
2650  * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
2651  * GID.  RoCE uses a different mechanism, but still generates a GID via
2652  * a prescribed mechanism and port specific data.
2653  *
2654  * Return: true if the port uses a GID address to identify devices on the
2655  * network.
2656  */
2657 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num)
2658 {
2659         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB;
2660 }
2661 
2662 /**
2663  * rdma_cap_eth_ah - Check if the port of device has the capability
2664  * Ethernet Address Handle.
2665  * @device: Device to check
2666  * @port_num: Port number to check
2667  *
2668  * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
2669  * to fabricate GIDs over Ethernet/IP specific addresses native to the
2670  * port.  Normally, packet headers are generated by the sending host
2671  * adapter, but when sending connectionless datagrams, we must manually
2672  * inject the proper headers for the fabric we are communicating over.
2673  *
2674  * Return: true if we are running as a RoCE port and must force the
2675  * addition of a Global Route Header built from our Ethernet Address
2676  * Handle into our header list for connectionless packets.
2677  */
2678 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
2679 {
2680         return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH;
2681 }
2682 
2683 /**
2684  * rdma_cap_opa_ah - Check if the port of device supports
2685  * OPA Address handles
2686  * @device: Device to check
2687  * @port_num: Port number to check
2688  *
2689  * Return: true if we are running on an OPA device which supports
2690  * the extended OPA addressing.
2691  */
2692 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
2693 {
2694         return (device->port_immutable[port_num].core_cap_flags &
2695                 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
2696 }
2697 
2698 /**
2699  * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
2700  *
2701  * @device: Device
2702  * @port_num: Port number
2703  *
2704  * This MAD size includes the MAD headers and MAD payload.  No other headers
2705  * are included.
2706  *
2707  * Return the max MAD size required by the Port.  Will return 0 if the port
2708  * does not support MADs
2709  */
2710 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num)
2711 {
2712         return device->port_immutable[port_num].max_mad_size;
2713 }
2714 
2715 /**
2716  * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
2717  * @device: Device to check
2718  * @port_num: Port number to check
2719  *
2720  * RoCE GID table mechanism manages the various GIDs for a device.
2721  *
2722  * NOTE: if allocating the port's GID table has failed, this call will still
2723  * return true, but any RoCE GID table API will fail.
2724  *
2725  * Return: true if the port uses RoCE GID table mechanism in order to manage
2726  * its GIDs.
2727  */
2728 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
2729                                            u8 port_num)
2730 {
2731         return rdma_protocol_roce(device, port_num) &&
2732                 device->add_gid && device->del_gid;
2733 }
2734 
2735 /*
2736  * Check if the device supports READ W/ INVALIDATE.
2737  */
2738 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
2739 {
2740         /*
2741          * iWarp drivers must support READ W/ INVALIDATE.  No other protocol
2742          * has support for it yet.
2743          */
2744         return rdma_protocol_iwarp(dev, port_num);
2745 }
2746 
2747 int ib_query_gid(struct ib_device *device,
2748                  u8 port_num, int index, union ib_gid *gid,
2749                  struct ib_gid_attr *attr);
2750 
2751 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2752                          int state);
2753 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2754                      struct ifla_vf_info *info);
2755 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2756                     struct ifla_vf_stats *stats);
2757 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2758                    int type);
2759 
2760 int ib_query_pkey(struct ib_device *device,
2761                   u8 port_num, u16 index, u16 *pkey);
2762 
2763 int ib_modify_device(struct ib_device *device,
2764                      int device_modify_mask,
2765                      struct ib_device_modify *device_modify);
2766 
2767 int ib_modify_port(struct ib_device *device,
2768                    u8 port_num, int port_modify_mask,
2769                    struct ib_port_modify *port_modify);
2770 
2771 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2772                 enum ib_gid_type gid_type, struct net_device *ndev,
2773                 u8 *port_num, u16 *index);
2774 
2775 int ib_find_pkey(struct ib_device *device,
2776                  u8 port_num, u16 pkey, u16 *index);
2777 
2778 enum ib_pd_flags {
2779         /*
2780          * Create a memory registration for all memory in the system and place
2781          * the rkey for it into pd->unsafe_global_rkey.  This can be used by
2782          * ULPs to avoid the overhead of dynamic MRs.
2783          *
2784          * This flag is generally considered unsafe and must only be used in
2785          * extremly trusted environments.  Every use of it will log a warning
2786          * in the kernel log.
2787          */
2788         IB_PD_UNSAFE_GLOBAL_RKEY        = 0x01,
2789 };
2790 
2791 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
2792                 const char *caller);
2793 #define ib_alloc_pd(device, flags) \
2794         __ib_alloc_pd((device), (flags), __func__)
2795 void ib_dealloc_pd(struct ib_pd *pd);
2796 
2797 /**
2798  * rdma_create_ah - Creates an address handle for the given address vector.
2799  * @pd: The protection domain associated with the address handle.
2800  * @ah_attr: The attributes of the address vector.
2801  *
2802  * The address handle is used to reference a local or global destination
2803  * in all UD QP post sends.
2804  */
2805 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
2806 
2807 /**
2808  * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
2809  *   work completion.
2810  * @hdr: the L3 header to parse
2811  * @net_type: type of header to parse
2812  * @sgid: place to store source gid
2813  * @dgid: place to store destination gid
2814  */
2815 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2816                               enum rdma_network_type net_type,
2817                               union ib_gid *sgid, union ib_gid *dgid);
2818 
2819 /**
2820  * ib_get_rdma_header_version - Get the header version
2821  * @hdr: the L3 header to parse
2822  */
2823 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2824 
2825 /**
2826  * ib_init_ah_from_wc - Initializes address handle attributes from a
2827  *   work completion.
2828  * @device: Device on which the received message arrived.
2829  * @port_num: Port on which the received message arrived.
2830  * @wc: Work completion associated with the received message.
2831  * @grh: References the received global route header.  This parameter is
2832  *   ignored unless the work completion indicates that the GRH is valid.
2833  * @ah_attr: Returned attributes that can be used when creating an address
2834  *   handle for replying to the message.
2835  */
2836 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
2837                        const struct ib_wc *wc, const struct ib_grh *grh,
2838                        struct rdma_ah_attr *ah_attr);
2839 
2840 /**
2841  * ib_create_ah_from_wc - Creates an address handle associated with the
2842  *   sender of the specified work completion.
2843  * @pd: The protection domain associated with the address handle.
2844  * @wc: Work completion information associated with a received message.
2845  * @grh: References the received global route header.  This parameter is
2846  *   ignored unless the work completion indicates that the GRH is valid.
2847  * @port_num: The outbound port number to associate with the address.
2848  *
2849  * The address handle is used to reference a local or global destination
2850  * in all UD QP post sends.
2851  */
2852 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
2853                                    const struct ib_grh *grh, u8 port_num);
2854 
2855 /**
2856  * rdma_modify_ah - Modifies the address vector associated with an address
2857  *   handle.
2858  * @ah: The address handle to modify.
2859  * @ah_attr: The new address vector attributes to associate with the
2860  *   address handle.
2861  */
2862 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2863 
2864 /**
2865  * rdma_query_ah - Queries the address vector associated with an address
2866  *   handle.
2867  * @ah: The address handle to query.
2868  * @ah_attr: The address vector attributes associated with the address
2869  *   handle.
2870  */
2871 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2872 
2873 /**
2874  * rdma_destroy_ah - Destroys an address handle.
2875  * @ah: The address handle to destroy.
2876  */
2877 int rdma_destroy_ah(struct ib_ah *ah);
2878 
2879 /**
2880  * ib_create_srq - Creates a SRQ associated with the specified protection
2881  *   domain.
2882  * @pd: The protection domain associated with the SRQ.
2883  * @srq_init_attr: A list of initial attributes required to create the
2884  *   SRQ.  If SRQ creation succeeds, then the attributes are updated to
2885  *   the actual capabilities of the created SRQ.
2886  *
2887  * srq_attr->max_wr and srq_attr->max_sge are read the determine the
2888  * requested size of the SRQ, and set to the actual values allocated
2889  * on return.  If ib_create_srq() succeeds, then max_wr and max_sge
2890  * will always be at least as large as the requested values.
2891  */
2892 struct ib_srq *ib_create_srq(struct ib_pd *pd,
2893                              struct ib_srq_init_attr *srq_init_attr);
2894 
2895 /**
2896  * ib_modify_srq - Modifies the attributes for the specified SRQ.
2897  * @srq: The SRQ to modify.
2898  * @srq_attr: On input, specifies the SRQ attributes to modify.  On output,
2899  *   the current values of selected SRQ attributes are returned.
2900  * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
2901  *   are being modified.
2902  *
2903  * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
2904  * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
2905  * the number of receives queued drops below the limit.
2906  */
2907 int ib_modify_srq(struct ib_srq *srq,
2908                   struct ib_srq_attr *srq_attr,
2909                   enum ib_srq_attr_mask srq_attr_mask);
2910 
2911 /**
2912  * ib_query_srq - Returns the attribute list and current values for the
2913  *   specified SRQ.
2914  * @srq: The SRQ to query.
2915  * @srq_attr: The attributes of the specified SRQ.
2916  */
2917 int ib_query_srq(struct ib_srq *srq,
2918                  struct ib_srq_attr *srq_attr);
2919 
2920 /**
2921  * ib_destroy_srq - Destroys the specified SRQ.
2922  * @srq: The SRQ to destroy.
2923  */
2924 int ib_destroy_srq(struct ib_srq *srq);
2925 
2926 /**
2927  * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
2928  * @srq: The SRQ to post the work request on.
2929  * @recv_wr: A list of work requests to post on the receive queue.
2930  * @bad_recv_wr: On an immediate failure, this parameter will reference
2931  *   the work request that failed to be posted on the QP.
2932  */
2933 static inline int ib_post_srq_recv(struct ib_srq *srq,
2934                                    struct ib_recv_wr *recv_wr,
2935                                    struct ib_recv_wr **bad_recv_wr)
2936 {
2937         return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
2938 }
2939 
2940 /**
2941  * ib_create_qp - Creates a QP associated with the specified protection
2942  *   domain.
2943  * @pd: The protection domain associated with the QP.
2944  * @qp_init_attr: A list of initial attributes required to create the
2945  *   QP.  If QP creation succeeds, then the attributes are updated to
2946  *   the actual capabilities of the created QP.
2947  */
2948 struct ib_qp *ib_create_qp(struct ib_pd *pd,
2949                            struct ib_qp_init_attr *qp_init_attr);
2950 
2951 /**
2952  * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
2953  * @qp: The QP to modify.
2954  * @attr: On input, specifies the QP attributes to modify.  On output,
2955  *   the current values of selected QP attributes are returned.
2956  * @attr_mask: A bit-mask used to specify which attributes of the QP
2957  *   are being modified.
2958  * @udata: pointer to user's input output buffer information
2959  *   are being modified.
2960  * It returns 0 on success and returns appropriate error code on error.
2961  */
2962 int ib_modify_qp_with_udata(struct ib_qp *qp,
2963                             struct ib_qp_attr *attr,
2964                             int attr_mask,
2965                             struct ib_udata *udata);
2966 
2967 /**
2968  * ib_modify_qp - Modifies the attributes for the specified QP and then
2969  *   transitions the QP to the given state.
2970  * @qp: The QP to modify.
2971  * @qp_attr: On input, specifies the QP attributes to modify.  On output,
2972  *   the current values of selected QP attributes are returned.
2973  * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
2974  *   are being modified.
2975  */
2976 int ib_modify_qp(struct ib_qp *qp,
2977                  struct ib_qp_attr *qp_attr,
2978                  int qp_attr_mask);
2979 
2980 /**
2981  * ib_query_qp - Returns the attribute list and current values for the
2982  *   specified QP.
2983  * @qp: The QP to query.
2984  * @qp_attr: The attributes of the specified QP.
2985  * @qp_attr_mask: A bit-mask used to select specific attributes to query.
2986  * @qp_init_attr: Additional attributes of the selected QP.
2987  *
2988  * The qp_attr_mask may be used to limit the query to gathering only the
2989  * selected attributes.
2990  */
2991 int ib_query_qp(struct ib_qp *qp,
2992                 struct ib_qp_attr *qp_attr,
2993                 int qp_attr_mask,
2994                 struct ib_qp_init_attr *qp_init_attr);
2995 
2996 /**
2997  * ib_destroy_qp - Destroys the specified QP.
2998  * @qp: The QP to destroy.
2999  */
3000 int ib_destroy_qp(struct ib_qp *qp);
3001 
3002 /**
3003  * ib_open_qp - Obtain a reference to an existing sharable QP.
3004  * @xrcd - XRC domain
3005  * @qp_open_attr: Attributes identifying the QP to open.
3006  *
3007  * Returns a reference to a sharable QP.
3008  */
3009 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
3010                          struct ib_qp_open_attr *qp_open_attr);
3011 
3012 /**
3013  * ib_close_qp - Release an external reference to a QP.
3014  * @qp: The QP handle to release
3015  *
3016  * The opened QP handle is released by the caller.  The underlying
3017  * shared QP is not destroyed until all internal references are released.
3018  */
3019 int ib_close_qp(struct ib_qp *qp);
3020 
3021 /**
3022  * ib_post_send - Posts a list of work requests to the send queue of
3023  *   the specified QP.
3024  * @qp: The QP to post the work request on.
3025  * @send_wr: A list of work requests to post on the send queue.
3026  * @bad_send_wr: On an immediate failure, this parameter will reference
3027  *   the work request that failed to be posted on the QP.
3028  *
3029  * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
3030  * error is returned, the QP state shall not be affected,
3031  * ib_post_send() will return an immediate error after queueing any
3032  * earlier work requests in the list.
3033  */
3034 static inline int ib_post_send(struct ib_qp *qp,
3035                                struct ib_send_wr *send_wr,
3036                                struct ib_send_wr **bad_send_wr)
3037 {
3038         return qp->device->post_send(qp, send_wr, bad_send_wr);
3039 }
3040 
3041 /**
3042  * ib_post_recv - Posts a list of work requests to the receive queue of
3043  *   the specified QP.
3044  * @qp: The QP to post the work request on.
3045  * @recv_wr: A list of work requests to post on the receive queue.
3046  * @bad_recv_wr: On an immediate failure, this parameter will reference
3047  *   the work request that failed to be posted on the QP.
3048  */
3049 static inline int ib_post_recv(struct ib_qp *qp,
3050                                struct ib_recv_wr *recv_wr,
3051                                struct ib_recv_wr **bad_recv_wr)
3052 {
3053         return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
3054 }
3055 
3056 struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
3057                 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
3058 void ib_free_cq(struct ib_cq *cq);
3059 int ib_process_cq_direct(struct ib_cq *cq, int budget);
3060 
3061 /**
3062  * ib_create_cq - Creates a CQ on the specified device.
3063  * @device: The device on which to create the CQ.
3064  * @comp_handler: A user-specified callback that is invoked when a
3065  *   completion event occurs on the CQ.
3066  * @event_handler: A user-specified callback that is invoked when an
3067  *   asynchronous event not associated with a completion occurs on the CQ.
3068  * @cq_context: Context associated with the CQ returned to the user via
3069  *   the associated completion and event handlers.
3070  * @cq_attr: The attributes the CQ should be created upon.
3071  *
3072  * Users can examine the cq structure to determine the actual CQ size.
3073  */
3074 struct ib_cq *ib_create_cq(struct ib_device *device,
3075                            ib_comp_handler comp_handler,
3076                            void (*event_handler)(struct ib_event *, void *),
3077                            void *cq_context,
3078                            const struct ib_cq_init_attr *cq_attr);
3079 
3080 /**
3081  * ib_resize_cq - Modifies the capacity of the CQ.
3082  * @cq: The CQ to resize.
3083  * @cqe: The minimum size of the CQ.
3084  *
3085  * Users can examine the cq structure to determine the actual CQ size.
3086  */
3087 int ib_resize_cq(struct ib_cq *cq, int cqe);
3088 
3089 /**
3090  * ib_modify_cq - Modifies moderation params of the CQ
3091  * @cq: The CQ to modify.
3092  * @cq_count: number of CQEs that will trigger an event
3093  * @cq_period: max period of time in usec before triggering an event
3094  *
3095  */
3096 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
3097 
3098 /**
3099  * ib_destroy_cq - Destroys the specified CQ.
3100  * @cq: The CQ to destroy.
3101  */
3102 int ib_destroy_cq(struct ib_cq *cq);
3103 
3104 /**
3105  * ib_poll_cq - poll a CQ for completion(s)
3106  * @cq:the CQ being polled
3107  * @num_entries:maximum number of completions to return
3108  * @wc:array of at least @num_entries &struct ib_wc where completions
3109  *   will be returned
3110  *
3111  * Poll a CQ for (possibly multiple) completions.  If the return value
3112  * is < 0, an error occurred.  If the return value is >= 0, it is the
3113  * number of completions returned.  If the return value is
3114  * non-negative and < num_entries, then the CQ was emptied.
3115  */
3116 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
3117                              struct ib_wc *wc)
3118 {
3119         return cq->device->poll_cq(cq, num_entries, wc);
3120 }
3121 
3122 /**
3123  * ib_peek_cq - Returns the number of unreaped completions currently
3124  *   on the specified CQ.
3125  * @cq: The CQ to peek.
3126  * @wc_cnt: A minimum number of unreaped completions to check for.
3127  *
3128  * If the number of unreaped completions is greater than or equal to wc_cnt,
3129  * this function returns wc_cnt, otherwise, it returns the actual number of
3130  * unreaped completions.
3131  */
3132 int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
3133 
3134 /**
3135  * ib_req_notify_cq - Request completion notification on a CQ.
3136  * @cq: The CQ to generate an event for.
3137  * @flags:
3138  *   Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
3139  *   to request an event on the next solicited event or next work
3140  *   completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
3141  *   may also be |ed in to request a hint about missed events, as
3142  *   described below.
3143  *
3144  * Return Value:
3145  *    < 0 means an error occurred while requesting notification
3146  *   == 0 means notification was requested successfully, and if
3147  *        IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
3148  *        were missed and it is safe to wait for another event.  In
3149  *        this case is it guaranteed that any work completions added
3150  *        to the CQ since the last CQ poll will trigger a completion
3151  *        notification event.
3152  *    > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
3153  *        in.  It means that the consumer must poll the CQ again to
3154  *        make sure it is empty to avoid missing an event because of a
3155  *        race between requesting notification and an entry being
3156  *        added to the CQ.  This return value means it is possible
3157  *        (but not guaranteed) that a work completion has been added
3158  *        to the CQ since the last poll without triggering a
3159  *        completion notification event.
3160  */
3161 static inline int ib_req_notify_cq(struct ib_cq *cq,
3162                                    enum ib_cq_notify_flags flags)
3163 {
3164         return cq->device->req_notify_cq(cq, flags);
3165 }
3166 
3167 /**
3168  * ib_req_ncomp_notif - Request completion notification when there are
3169  *   at least the specified number of unreaped completions on the CQ.
3170  * @cq: The CQ to generate an event for.
3171  * @wc_cnt: The number of unreaped completions that should be on the
3172  *   CQ before an event is generated.
3173  */
3174 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
3175 {
3176         return cq->device->req_ncomp_notif ?
3177                 cq->device->req_ncomp_notif(cq, wc_cnt) :
3178                 -ENOSYS;
3179 }
3180 
3181 /**
3182  * ib_dma_mapping_error - check a DMA addr for error
3183  * @dev: The device for which the dma_addr was created
3184  * @dma_addr: The DMA address to check
3185  */
3186 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
3187 {
3188         return dma_mapping_error(dev->dma_device, dma_addr);
3189 }
3190 
3191 /**
3192  * ib_dma_map_single - Map a kernel virtual address to DMA address
3193  * @dev: The device for which the dma_addr is to be created
3194  * @cpu_addr: The kernel virtual address
3195  * @size: The size of the region in bytes
3196  * @direction: The direction of the DMA
3197  */
3198 static inline u64 ib_dma_map_single(struct ib_device *dev,
3199                                     void *cpu_addr, size_t size,
3200                                     enum dma_data_direction direction)
3201 {
3202         return dma_map_single(dev->dma_device, cpu_addr, size, direction);
3203 }
3204 
3205 /**
3206  * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
3207  * @dev: The device for which the DMA address was created
3208  * @addr: The DMA address
3209  * @size: The size of the region in bytes
3210  * @direction: The direction of the DMA
3211  */
3212 static inline void ib_dma_unmap_single(struct ib_device *dev,
3213                                        u64 addr, size_t size,
3214                                        enum dma_data_direction direction)
3215 {
3216         dma_unmap_single(dev->dma_device, addr, size, direction);
3217 }
3218 
3219 /**
3220  * ib_dma_map_page - Map a physical page to DMA address
3221  * @dev: The device for which the dma_addr is to be created
3222  * @page: The page to be mapped
3223  * @offset: The offset within the page
3224  * @size: The size of the region in bytes
3225  * @direction: The direction of the DMA
3226  */
3227 static inline u64 ib_dma_map_page(struct ib_device *dev,
3228                                   struct page *page,
3229                                   unsigned long offset,
3230                                   size_t size,
3231                                          enum dma_data_direction direction)
3232 {
3233         return dma_map_page(dev->dma_device, page, offset, size, direction);
3234 }
3235 
3236 /**
3237  * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
3238  * @dev: The device for which the DMA address was created
3239  * @addr: The DMA address
3240  * @size: The size of the region in bytes
3241  * @direction: The direction of the DMA
3242  */
3243 static inline void ib_dma_unmap_page(struct ib_device *dev,
3244                                      u64 addr, size_t size,
3245                                      enum dma_data_direction direction)
3246 {
3247         dma_unmap_page(dev->dma_device, addr, size, direction);
3248 }
3249 
3250 /**
3251  * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
3252  * @dev: The device for which the DMA addresses are to be created
3253  * @sg: The array of scatter/gather entries
3254  * @nents: The number of scatter/gather entries
3255  * @direction: The direction of the DMA
3256  */
3257 static inline int ib_dma_map_sg(struct ib_device *dev,
3258                                 struct scatterlist *sg, int nents,
3259                                 enum dma_data_direction direction)
3260 {
3261         return dma_map_sg(dev->dma_device, sg, nents, direction);
3262 }
3263 
3264 /**
3265  * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
3266  * @dev: The device for which the DMA addresses were created
3267  * @sg: The array of scatter/gather entries
3268  * @nents: The number of scatter/gather entries
3269  * @direction: The direction of the DMA
3270  */
3271 static inline void ib_dma_unmap_sg(struct ib_device *dev,
3272                                    struct scatterlist *sg, int nents,
3273                                    enum dma_data_direction direction)
3274 {
3275         dma_unmap_sg(dev->dma_device, sg, nents, direction);
3276 }
3277 
3278 static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3279                                       struct scatterlist *sg, int nents,
3280                                       enum dma_data_direction direction,
3281                                       unsigned long dma_attrs)
3282 {
3283         return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3284                                 dma_attrs);
3285 }
3286 
3287 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3288                                          struct scatterlist *sg, int nents,
3289                                          enum dma_data_direction direction,
3290                                          unsigned long dma_attrs)
3291 {
3292         dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
3293 }
3294 /**
3295  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
3296  * @dev: The device for which the DMA addresses were created
3297  * @sg: The scatter/gather entry
3298  *
3299  * Note: this function is obsolete. To do: change all occurrences of
3300  * ib_sg_dma_address() into sg_dma_address().
3301  */
3302 static inline u64 ib_sg_dma_address(struct ib_device *dev,
3303                                     struct scatterlist *sg)
3304 {
3305         return sg_dma_address(sg);
3306 }
3307 
3308 /**
3309  * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
3310  * @dev: The device for which the DMA addresses were created
3311  * @sg: The scatter/gather entry
3312  *
3313  * Note: this function is obsolete. To do: change all occurrences of
3314  * ib_sg_dma_len() into sg_dma_len().
3315  */
3316 static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
3317                                          struct scatterlist *sg)
3318 {
3319         return sg_dma_len(sg);
3320 }
3321 
3322 /**
3323  * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
3324  * @dev: The device for which the DMA address was created
3325  * @addr: The DMA address
3326  * @size: The size of the region in bytes
3327  * @dir: The direction of the DMA
3328  */
3329 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
3330                                               u64 addr,
3331                                               size_t size,
3332                                               enum dma_data_direction dir)
3333 {
3334         dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
3335 }
3336 
3337 /**
3338  * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
3339  * @dev: The device for which the DMA address was created
3340  * @addr: The DMA address
3341  * @size: The size of the region in bytes
3342  * @dir: The direction of the DMA
3343  */
3344 static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
3345                                                  u64 addr,
3346                                                  size_t size,
3347                                                  enum dma_data_direction dir)
3348 {
3349         dma_sync_single_for_device(dev->dma_device, addr, size, dir);
3350 }
3351 
3352 /**
3353  * ib_dma_alloc_coherent - Allocate memory and map it for DMA
3354  * @dev: The device for which the DMA address is requested
3355  * @size: The size of the region to allocate in bytes
3356  * @dma_handle: A pointer for returning the DMA address of the region
3357  * @flag: memory allocator flags
3358  */
3359 static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
3360                                            size_t size,
3361                                            dma_addr_t *dma_handle,
3362                                            gfp_t flag)
3363 {
3364         return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
3365 }
3366 
3367 /**
3368  * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
3369  * @dev: The device for which the DMA addresses were allocated
3370  * @size: The size of the region
3371  * @cpu_addr: the address returned by ib_dma_alloc_coherent()
3372  * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
3373  */
3374 static inline void ib_dma_free_coherent(struct ib_device *dev,
3375                                         size_t size, void *cpu_addr,
3376                                         dma_addr_t dma_handle)
3377 {
3378         dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
3379 }
3380 
3381 /**
3382  * ib_dereg_mr - Deregisters a memory region and removes it from the
3383  *   HCA translation table.
3384  * @mr: The memory region to deregister.
3385  *
3386  * This function can fail, if the memory region has memory windows bound to it.
3387  */
3388 int ib_dereg_mr(struct ib_mr *mr);
3389 
3390 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
3391                           enum ib_mr_type mr_type,
3392                           u32 max_num_sg);
3393 
3394 /**
3395  * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
3396  *   R_Key and L_Key.
3397  * @mr - struct ib_mr pointer to be updated.
3398  * @newkey - new key to be used.
3399  */
3400 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
3401 {
3402         mr->lkey = (mr->lkey & 0xffffff00) | newkey;
3403         mr->rkey = (mr->rkey & 0xffffff00) | newkey;
3404 }
3405 
3406 /**
3407  * ib_inc_rkey - increments the key portion of the given rkey. Can be used
3408  * for calculating a new rkey for type 2 memory windows.
3409  * @rkey - the rkey to increment.
3410  */
3411 static inline u32 ib_inc_rkey(u32 rkey)
3412 {
3413         const u32 mask = 0x000000ff;
3414         return ((rkey + 1) & mask) | (rkey & ~mask);
3415 }
3416 
3417 /**
3418  * ib_alloc_fmr - Allocates a unmapped fast memory region.
3419  * @pd: The protection domain associated with the unmapped region.
3420  * @mr_access_flags: Specifies the memory access rights.
3421  * @fmr_attr: Attributes of the unmapped region.
3422  *
3423  * A fast memory region must be mapped before it can be used as part of
3424  * a work request.
3425  */
3426 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
3427                             int mr_access_flags,
3428                             struct ib_fmr_attr *fmr_attr);
3429 
3430 /**
3431  * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
3432  * @fmr: The fast memory region to associate with the pages.
3433  * @page_list: An array of physical pages to map to the fast memory region.
3434  * @list_len: The number of pages in page_list.
3435  * @iova: The I/O virtual address to use with the mapped region.
3436  */
3437 static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
3438                                   u64 *page_list, int list_len,
3439                                   u64 iova)
3440 {
3441         return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
3442 }
3443 
3444 /**
3445  * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
3446  * @fmr_list: A linked list of fast memory regions to unmap.
3447  */
3448 int ib_unmap_fmr(struct list_head *fmr_list);
3449 
3450 /**
3451  * ib_dealloc_fmr - Deallocates a fast memory region.
3452  * @fmr: The fast memory region to deallocate.
3453  */
3454 int ib_dealloc_fmr(struct ib_fmr *fmr);
3455 
3456 /**
3457  * ib_attach_mcast - Attaches the specified QP to a multicast group.
3458  * @qp: QP to attach to the multicast group.  The QP must be type
3459  *   IB_QPT_UD.
3460  * @gid: Multicast group GID.
3461  * @lid: Multicast group LID in host byte order.
3462  *
3463  * In order to send and receive multicast packets, subnet
3464  * administration must have created the multicast group and configured
3465  * the fabric appropriately.  The port associated with the specified
3466  * QP must also be a member of the multicast group.
3467  */
3468 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3469 
3470 /**
3471  * ib_detach_mcast - Detaches the specified QP from a multicast group.
3472  * @qp: QP to detach from the multicast group.
3473  * @gid: Multicast group GID.
3474  * @lid: Multicast group LID in host byte order.
3475  */
3476 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
3477 
3478 /**
3479  * ib_alloc_xrcd - Allocates an XRC domain.
3480  * @device: The device on which to allocate the XRC domain.
3481  */
3482 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
3483 
3484 /**
3485  * ib_dealloc_xrcd - Deallocates an XRC domain.
3486  * @xrcd: The XRC domain to deallocate.
3487  */
3488 int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
3489 
3490 struct ib_flow *ib_create_flow(struct ib_qp *qp,
3491                                struct ib_flow_attr *flow_attr, int domain);
3492 int ib_destroy_flow(struct ib_flow *flow_id);
3493 
3494 static inline int ib_check_mr_access(int flags)
3495 {
3496         /*
3497          * Local write permission is required if remote write or
3498          * remote atomic permission is also requested.
3499          */
3500         if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
3501             !(flags & IB_ACCESS_LOCAL_WRITE))
3502                 return -EINVAL;
3503 
3504         return 0;
3505 }
3506 
3507 /**
3508  * ib_check_mr_status: lightweight check of MR status.
3509  *     This routine may provide status checks on a selected
3510  *     ib_mr. first use is for signature status check.
3511  *
3512  * @mr: A memory region.
3513  * @check_mask: Bitmask of which checks to perform from
3514  *     ib_mr_status_check enumeration.
3515  * @mr_status: The container of relevant status checks.
3516  *     failed checks will be indicated in the status bitmask
3517  *     and the relevant info shall be in the error item.
3518  */
3519 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3520                        struct ib_mr_status *mr_status);
3521 
3522 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3523                                             u16 pkey, const union ib_gid *gid,
3524                                             const struct sockaddr *addr);
3525 struct ib_wq *ib_create_wq(struct ib_pd *pd,
3526                            struct ib_wq_init_attr *init_attr);
3527 int ib_destroy_wq(struct ib_wq *wq);
3528 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
3529                  u32 wq_attr_mask);
3530 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
3531                                                  struct ib_rwq_ind_table_init_attr*
3532                                                  wq_ind_table_init_attr);
3533 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
3534 
3535 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3536                  unsigned int *sg_offset, unsigned int page_size);
3537 
3538 static inline int
3539 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
3540                   unsigned int *sg_offset, unsigned int page_size)
3541 {
3542         int n;
3543 
3544         n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
3545         mr->iova = 0;
3546 
3547         return n;
3548 }
3549 
3550 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3551                 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
3552 
3553 void ib_drain_rq(struct ib_qp *qp);
3554 void ib_drain_sq(struct ib_qp *qp);
3555 void ib_drain_qp(struct ib_qp *qp);
3556 
3557 int ib_resolve_eth_dmac(struct ib_device *device,
3558                         struct rdma_ah_attr *ah_attr);
3559 
3560 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
3561 {
3562         if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
3563                 return attr->roce.dmac;
3564         return NULL;
3565 }
3566 
3567 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
3568 {
3569         if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3570                 attr->ib.dlid = (u16)dlid;
3571         else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3572                 attr->opa.dlid = dlid;
3573 }
3574 
3575 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
3576 {
3577         if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3578                 return attr->ib.dlid;
3579         else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3580                 return attr->opa.dlid;
3581         return 0;
3582 }
3583 
3584 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
3585 {
3586         attr->sl = sl;
3587 }
3588 
3589 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
3590 {
3591         return attr->sl;
3592 }
3593 
3594 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
3595                                          u8 src_path_bits)
3596 {
3597         if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3598                 attr->ib.src_path_bits = src_path_bits;
3599         else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3600                 attr->opa.src_path_bits = src_path_bits;
3601 }
3602 
3603 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
3604 {
3605         if (attr->type == RDMA_AH_ATTR_TYPE_IB)
3606                 return attr->ib.src_path_bits;
3607         else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
3608                 return attr->opa.src_path_bits;
3609         return 0;
3610 }
3611 
3612 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
3613 {
3614         attr->port_num = port_num;
3615 }
3616 
3617 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
3618 {
3619         return attr->port_num;
3620 }
3621 
3622 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
3623                                            u8 static_rate)
3624 {
3625         attr->static_rate = static_rate;
3626 }
3627 
3628 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
3629 {
3630         return attr->static_rate;
3631 }
3632 
3633 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
3634                                         enum ib_ah_flags flag)
3635 {
3636         attr->ah_flags = flag;
3637 }
3638 
3639 static inline enum ib_ah_flags
3640                 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
3641 {
3642         return attr->ah_flags;
3643 }
3644 
3645 static inline const struct ib_global_route
3646                 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
3647 {
3648         return &attr->grh;
3649 }
3650 
3651 /*To retrieve and modify the grh */
3652 static inline struct ib_global_route
3653                 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
3654 {
3655         return &attr->grh;
3656 }
3657 
3658 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
3659 {
3660         struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3661 
3662         memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
3663 }
3664 
3665 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
3666                                              __be64 prefix)
3667 {
3668         struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3669 
3670         grh->dgid.global.subnet_prefix = prefix;
3671 }
3672 
3673 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
3674                                             __be64 if_id)
3675 {
3676         struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3677 
3678         grh->dgid.global.interface_id = if_id;
3679 }
3680 
3681 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
3682                                    union ib_gid *dgid, u32 flow_label,
3683                                    u8 sgid_index, u8 hop_limit,
3684                                    u8 traffic_class)
3685 {
3686         struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
3687 
3688         attr->ah_flags = IB_AH_GRH;
3689         if (dgid)
3690                 grh->dgid = *dgid;
3691         grh->flow_label = flow_label;
3692         grh->sgid_index = sgid_index;
3693         grh->hop_limit = hop_limit;
3694         grh->traffic_class = traffic_class;
3695 }
3696 
3697 /*Get AH type */
3698 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
3699                                                        u32 port_num)
3700 {
3701         if ((rdma_protocol_roce(dev, port_num)) ||
3702             (rdma_protocol_iwarp(dev, port_num)))
3703                 return RDMA_AH_ATTR_TYPE_ROCE;
3704         else if ((rdma_protocol_ib(dev, port_num)) &&
3705                  (rdma_cap_opa_ah(dev, port_num)))
3706                 return RDMA_AH_ATTR_TYPE_OPA;
3707         else
3708                 return RDMA_AH_ATTR_TYPE_IB;
3709 }
3710 #endif /* IB_VERBS_H */
3711 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp