Crate rdmaxcel_sys

Source

Modules§

ibv_port_state
ibv_qp_state
ibv_qp_type
ibv_wc_opcode
ibv_wc_status
ibv_wr_opcode

Structs§

CUctx_st
CUmemAccessDesc_st
Memory access descriptor
CUmemAllocationProp_st
Specifies the allocation properties for a allocation.
CUmemAllocationProp_st__bindgen_ty_1
CUmemLocation_st
Specifies a memory location.
_IO_FILE
_IO_codecvt
_IO_marker
_IO_wide_data
__IncompleteArrayField
__pthread_cond_s
__pthread_cond_s__bindgen_ty_1__bindgen_ty_1
__pthread_cond_s__bindgen_ty_2__bindgen_ty_1
__pthread_internal_list
__pthread_mutex_s
_compat_ibv_port_attr
_ibv_device_ops
completion_cache
completion_node
cqe_poll_params_t
ib_uverbs_flow_action_esp
ib_uverbs_flow_action_esp_encap
ibv_access_flags
ibv_ah
ibv_ah_attr
ibv_alloc_dm_attr
ibv_async_event
ibv_comp_channel
ibv_context
ibv_context_ops
ibv_counter_attach_attr
ibv_counters
ibv_counters_init_attr
ibv_cq
ibv_cq_ex
ibv_cq_init_attr_ex
ibv_cq_moderation_caps
ibv_data_buf
ibv_device
ibv_device_attr
ibv_device_attr_ex
ibv_dm
ibv_ece
ibv_flow
ibv_flow_action
ibv_flow_action_esp_attr
ibv_flow_attr
ibv_flow_esp_filter
ibv_flow_eth_filter
ibv_flow_gre_filter
ibv_flow_ipv4_ext_filter
ibv_flow_ipv4_filter
ibv_flow_ipv6_filter
ibv_flow_mpls_filter
ibv_flow_spec
ibv_flow_spec__bindgen_ty_1__bindgen_ty_1
ibv_flow_spec_action_drop
ibv_flow_spec_action_handle
ibv_flow_spec_action_tag
ibv_flow_spec_counter_action
ibv_flow_spec_esp
ibv_flow_spec_eth
ibv_flow_spec_gre
ibv_flow_spec_ipv4
ibv_flow_spec_ipv6
ibv_flow_spec_ipv4_ext
ibv_flow_spec_mpls
ibv_flow_spec_tcp_udp
ibv_flow_spec_tunnel
ibv_flow_tcp_udp_filter
ibv_flow_tunnel_filter
ibv_gid__bindgen_ty_1
ibv_gid_entry
ibv_global_route
ibv_grh
ibv_moderate_cq
ibv_modify_cq_attr
ibv_mr
ibv_mw
ibv_mw_bind
ibv_mw_bind_info
ibv_odp_caps
ibv_odp_caps__bindgen_ty_1
ibv_ops_wr
ibv_ops_wr__bindgen_ty_1
ibv_ops_wr__bindgen_ty_1__bindgen_ty_1
ibv_packet_pacing_caps
ibv_parent_domain_init_attr
ibv_pci_atomic_caps
ibv_pd
ibv_poll_cq_attr
ibv_port_attr
ibv_port_cap_flags
ibv_qp
ibv_qp_attr
ibv_qp_attr_mask
ibv_qp_cap
ibv_qp_ex
ibv_qp_init_attr
ibv_qp_init_attr_ex
ibv_qp_open_attr
ibv_qp_rate_limit_attr
ibv_query_device_ex_input
ibv_recv_wr
ibv_rss_caps
ibv_rvh
ibv_rwq_ind_table
ibv_rwq_ind_table_init_attr
ibv_rx_hash_conf
ibv_send_flags
ibv_send_wr
ibv_send_wr__bindgen_ty_2__bindgen_ty_1
ibv_send_wr__bindgen_ty_2__bindgen_ty_2
ibv_send_wr__bindgen_ty_2__bindgen_ty_3
ibv_send_wr__bindgen_ty_3__bindgen_ty_1
ibv_send_wr__bindgen_ty_4__bindgen_ty_1
ibv_send_wr__bindgen_ty_4__bindgen_ty_2
ibv_sge
ibv_srq
ibv_srq_attr
ibv_srq_init_attr
ibv_srq_init_attr_ex
ibv_td
ibv_td_init_attr
ibv_tm_cap
ibv_tm_caps
ibv_tmh
ibv_tso_caps
ibv_values_ex
ibv_wc
ibv_wc_flags
ibv_wc_tm_info
ibv_wq
ibv_wq_attr
ibv_wq_init_attr
ibv_xrcd
ibv_xrcd_init_attr
mlx5_ib_uapi_devx_async_cmd_hdr
mlx5_ib_uapi_devx_async_event_hdr
mlx5_wqe_atomic_seg
mlx5_wqe_av
mlx5_wqe_av__bindgen_ty_1__bindgen_ty_1
mlx5_wqe_ctrl_seg
mlx5_wqe_data_seg
mlx5_wqe_datagram_seg
mlx5_wqe_eth_seg
mlx5_wqe_inl_data_seg
mlx5_wqe_mkey_context_seg
mlx5_wqe_raddr_seg
mlx5_wqe_srq_next_seg
mlx5_wqe_tm_seg
mlx5_wqe_umr_ctrl_seg
mlx5_wqe_umr_klm_seg
mlx5_wqe_umr_repeat_block_seg
mlx5_wqe_umr_repeat_ent_seg
mlx5dv_ah
mlx5dv_alloc_dm_attr
mlx5dv_clock_info
mlx5dv_context
mlx5dv_context_attr
mlx5dv_cq
mlx5dv_cq_init_attr
mlx5dv_cqe_comp_caps
mlx5dv_crypto_attr
mlx5dv_crypto_caps
mlx5dv_crypto_login_attr
mlx5dv_crypto_login_attr_ex
mlx5dv_crypto_login_obj
mlx5dv_crypto_login_query_attr
mlx5dv_ctx_allocators
mlx5dv_dc_init_attr
mlx5dv_dci_streams
mlx5dv_dci_streams_caps
mlx5dv_dek
mlx5dv_dek_attr
mlx5dv_dek_init_attr
mlx5dv_devx_cmd_comp
mlx5dv_devx_eq
mlx5dv_devx_event_channel
mlx5dv_devx_msi_vector
mlx5dv_devx_obj
mlx5dv_devx_uar
mlx5dv_devx_umem
mlx5dv_devx_umem_in
mlx5dv_dm
mlx5dv_dr_action
mlx5dv_dr_action_dest_attr
mlx5dv_dr_action_dest_reformat
mlx5dv_dr_domain
mlx5dv_dr_flow_meter_attr
mlx5dv_dr_flow_sampler_attr
mlx5dv_dr_matcher
mlx5dv_dr_matcher_layout
mlx5dv_dr_rule
mlx5dv_dr_table
mlx5dv_flow_action_attr
mlx5dv_flow_action_esp
mlx5dv_flow_match_parameters
mlx5dv_flow_matcher
mlx5dv_flow_matcher_attr
mlx5dv_mkey
mlx5dv_mkey_conf_attr
mlx5dv_mkey_err
mlx5dv_mkey_init_attr
mlx5dv_mr_interleaved
mlx5dv_obj
mlx5dv_obj__bindgen_ty_1
mlx5dv_obj__bindgen_ty_2
mlx5dv_obj__bindgen_ty_3
mlx5dv_obj__bindgen_ty_4
mlx5dv_obj__bindgen_ty_5
mlx5dv_obj__bindgen_ty_6
mlx5dv_obj__bindgen_ty_7
mlx5dv_pd
mlx5dv_pp
mlx5dv_qp
mlx5dv_qp__bindgen_ty_1
mlx5dv_qp__bindgen_ty_2
mlx5dv_qp__bindgen_ty_3
mlx5dv_qp_ex
mlx5dv_qp_init_attr
mlx5dv_rwq
mlx5dv_sched_attr
mlx5dv_sched_leaf
mlx5dv_sched_node
mlx5dv_sig_block_attr
mlx5dv_sig_block_domain
mlx5dv_sig_caps
mlx5dv_sig_crc
mlx5dv_sig_err
mlx5dv_sig_t10dif
mlx5dv_srq
mlx5dv_steering_anchor
mlx5dv_steering_anchor_attr
mlx5dv_striding_rq_caps
mlx5dv_striding_rq_init_attr
mlx5dv_sw_parsing_caps
mlx5dv_var
mlx5dv_vfio_context_attr
mlx5dv_wq_init_attr
poll_context
rdma_segment_info_t
rdmaxcel_qp
std_atomic
timespec
wqe_params_t

Constants§

CQE_POLL_ERROR
CQE_POLL_FALSE
CQE_POLL_TRUE
CUDA_ERROR_ALREADY_ACQUIRED
This indicates that a resource has already been acquired.
CUDA_ERROR_ALREADY_MAPPED
This indicates that the resource is already mapped.
CUDA_ERROR_ARRAY_IS_MAPPED
This indicates that the specified array is currently mapped and thus cannot be destroyed.
CUDA_ERROR_ASSERT
A device-side assert triggered during kernel execution. The context cannot be used anymore, and must be destroyed. All existing device memory allocations from this context are invalid and must be reconstructed if the program is to continue using CUDA.
CUDA_ERROR_CAPTURED_EVENT
This error indicates that the operation is not permitted on an event which was last recorded in a capturing stream.
CUDA_ERROR_CDP_NOT_SUPPORTED
This error indicates that the module is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it.
CUDA_ERROR_CDP_VERSION_MISMATCH
This error indicates that a module contains an unsupported interaction between different versions of CUDA Dynamic Parallelism.
CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE
This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable.
CUDA_ERROR_CONTEXT_ALREADY_CURRENT
This indicated that the context being supplied as a parameter to the API call was already the active context. \deprecated This error return is deprecated as of CUDA 3.2. It is no longer an error to attempt to push the active context via ::cuCtxPushCurrent().
CUDA_ERROR_CONTEXT_ALREADY_IN_USE
This indicates that the ::CUcontext passed to the API call can only be bound to a single CPU thread at a time but is already bound to a CPU thread.
CUDA_ERROR_CONTEXT_IS_DESTROYED
This error indicates that the context current to the calling thread has been destroyed using ::cuCtxDestroy, or is a primary context which has not yet been initialized.
CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE
This error indicates that the number of blocks launched per grid for a kernel that was launched via either ::cuLaunchCooperativeKernel or ::cuLaunchCooperativeKernelMultiDevice exceeds the maximum number of blocks as allowed by ::cuOccupancyMaxActiveBlocksPerMultiprocessor or ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT.
CUDA_ERROR_DEINITIALIZED
This indicates that the CUDA driver is in the process of shutting down.
CUDA_ERROR_DEVICE_NOT_LICENSED
This error indicates that the Grid license is not applied.
CUDA_ERROR_DEVICE_UNAVAILABLE
This indicates that requested CUDA device is unavailable at the current time. Devices are often unavailable due to use of ::CU_COMPUTEMODE_EXCLUSIVE_PROCESS or ::CU_COMPUTEMODE_PROHIBITED.
CUDA_ERROR_ECC_UNCORRECTABLE
This indicates that an uncorrectable ECC error was detected during execution.
CUDA_ERROR_EXTERNAL_DEVICE
This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device’s signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
CUDA_ERROR_FILE_NOT_FOUND
This indicates that the file specified was not found.
CUDA_ERROR_FUNCTION_NOT_LOADED
Indiciates a function handle is not loaded when calling an API that requires a loaded function.
CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE
This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update.
CUDA_ERROR_HARDWARE_STACK_ERROR
While executing a kernel, the device encountered a stack error. This can be due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED
This error indicates that the memory range passed to ::cuMemHostRegister() has already been registered.
CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED
This error indicates that the pointer passed to ::cuMemHostUnregister() does not correspond to any currently registered memory region.
CUDA_ERROR_ILLEGAL_ADDRESS
While executing a kernel, the device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
CUDA_ERROR_ILLEGAL_INSTRUCTION
While executing a kernel, the device encountered an illegal instruction. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
CUDA_ERROR_ILLEGAL_STATE
This indicates that a resource required by the API call is not in a valid state to perform the requested operation.
CUDA_ERROR_INVALID_ADDRESS_SPACE
While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
CUDA_ERROR_INVALID_CLUSTER_SIZE
Indicates a kernel launch error due to cluster misconfiguration.
CUDA_ERROR_INVALID_CONTEXT
This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had ::cuCtxDestroy() invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See ::cuCtxGetApiVersion() for more details. This can also be returned if the green context passed to an API call was not converted to a ::CUcontext using ::cuCtxFromGreenCtx API.
CUDA_ERROR_INVALID_DEVICE
This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device.
CUDA_ERROR_INVALID_GRAPHICS_CONTEXT
This indicates an error with OpenGL or DirectX context.
CUDA_ERROR_INVALID_HANDLE
This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like ::CUstream and ::CUevent.
CUDA_ERROR_INVALID_IMAGE
This indicates that the device kernel image is invalid. This can also indicate an invalid CUDA module.
CUDA_ERROR_INVALID_PC
While executing a kernel, the device program counter wrapped its address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
CUDA_ERROR_INVALID_PTX
This indicates that a PTX JIT compilation failed.
CUDA_ERROR_INVALID_RESOURCE_CONFIGURATION
This error indicates one or more resources are insufficient or non-applicable for the operation.
CUDA_ERROR_INVALID_RESOURCE_TYPE
This error indicates one or more resources passed in are not valid resource types for the operation.
CUDA_ERROR_INVALID_SOURCE
This indicates that the device kernel source is invalid. This includes compilation/linker errors encountered in device code or user error.
CUDA_ERROR_INVALID_VALUE
This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values.
CUDA_ERROR_JIT_COMPILATION_DISABLED
This indicates that the PTX JIT compilation was disabled.
CUDA_ERROR_JIT_COMPILER_NOT_FOUND
This indicates that the PTX JIT compiler library was not found.
CUDA_ERROR_LAUNCH_FAILED
An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING
This error indicates a kernel launch that uses an incompatible texturing mode.
CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES
This indicates that a launch did not occur because it did not have appropriate resources. This error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel’s register count. Passing arguments of the wrong size (i.e. a 64-bit pointer when a 32-bit int is expected) is equivalent to passing too many arguments and can also result in this error.
CUDA_ERROR_LAUNCH_TIMEOUT
This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device attribute ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
CUDA_ERROR_LOSSY_QUERY
This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments.
CUDA_ERROR_MAP_FAILED
This indicates that a map or register operation has failed.
CUDA_ERROR_MISALIGNED_ADDRESS
While executing a kernel, the device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
CUDA_ERROR_MPS_CLIENT_TERMINATED
This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched.
CUDA_ERROR_MPS_CONNECTION_FAILED
This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server.
CUDA_ERROR_MPS_MAX_CLIENTS_REACHED
This error indicates that the hardware resources required to create MPS client have been exhausted.
CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED
This error indicates the the hardware resources required to support device connections have been exhausted.
CUDA_ERROR_MPS_RPC_FAILURE
This error indicates that the remote procedural call between the MPS server and the MPS client failed.
CUDA_ERROR_MPS_SERVER_NOT_READY
This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure.
CUDA_ERROR_NOT_FOUND
This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names.
CUDA_ERROR_NOT_INITIALIZED
This indicates that the CUDA driver has not been initialized with ::cuInit() or that initialization has failed.
CUDA_ERROR_NOT_MAPPED
This indicates that a resource is not mapped.
CUDA_ERROR_NOT_MAPPED_AS_ARRAY
This indicates that a mapped resource is not available for access as an array.
CUDA_ERROR_NOT_MAPPED_AS_POINTER
This indicates that a mapped resource is not available for access as a pointer.
CUDA_ERROR_NOT_PERMITTED
This error indicates that the attempted operation is not permitted.
CUDA_ERROR_NOT_READY
This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than ::CUDA_SUCCESS (which indicates completion). Calls that may return this value include ::cuEventQuery() and ::cuStreamQuery().
CUDA_ERROR_NOT_SUPPORTED
This error indicates that the attempted operation is not supported on the current system or device.
CUDA_ERROR_NO_BINARY_FOR_GPU
This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration.
CUDA_ERROR_NO_DEVICE
This indicates that no CUDA-capable devices were detected by the installed CUDA driver.
CUDA_ERROR_NVLINK_UNCORRECTABLE
This indicates that an uncorrectable NVLink error was detected during the execution.
CUDA_ERROR_OPERATING_SYSTEM
This indicates that an OS call failed.
CUDA_ERROR_OUT_OF_MEMORY
The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation.
CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED
This error indicates that a call to ::cuCtxEnablePeerAccess() is trying to re-enable peer access to a context which has already had peer access to it enabled.
CUDA_ERROR_PEER_ACCESS_NOT_ENABLED
This error indicates that ::cuCtxDisablePeerAccess() is trying to disable peer access which has not been enabled yet via ::cuCtxEnablePeerAccess().
CUDA_ERROR_PEER_ACCESS_UNSUPPORTED
This indicates that peer access is not supported across the given devices.
CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE
This error indicates that the primary context for the specified device has already been initialized.
CUDA_ERROR_PROFILER_ALREADY_STARTED
\deprecated This error return is deprecated as of CUDA 5.0. It is no longer an error to call cuProfilerStart() when profiling is already enabled.
CUDA_ERROR_PROFILER_ALREADY_STOPPED
\deprecated This error return is deprecated as of CUDA 5.0. It is no longer an error to call cuProfilerStop() when profiling is already disabled.
CUDA_ERROR_PROFILER_DISABLED
This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler.
CUDA_ERROR_PROFILER_NOT_INITIALIZED
\deprecated This error return is deprecated as of CUDA 5.0. It is no longer an error to attempt to enable/disable the profiling via ::cuProfilerStart or ::cuProfilerStop without initialization.
CUDA_ERROR_SHARED_OBJECT_INIT_FAILED
This indicates that initialization of a shared object failed.
CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND
This indicates that a link to a shared object failed to resolve.
CUDA_ERROR_STREAM_CAPTURE_IMPLICIT
This error indicates a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy.
CUDA_ERROR_STREAM_CAPTURE_INVALIDATED
This error indicates that the current capture sequence on the stream has been invalidated due to a previous error.
CUDA_ERROR_STREAM_CAPTURE_ISOLATION
This error indicates that a dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary.
CUDA_ERROR_STREAM_CAPTURE_MERGE
This error indicates that the operation would have resulted in a merge of two independent capture sequences.
CUDA_ERROR_STREAM_CAPTURE_UNJOINED
This error indicates that the capture sequence contains a fork that was not joined to the primary stream.
CUDA_ERROR_STREAM_CAPTURE_UNMATCHED
This error indicates that the capture was not initiated in this stream.
CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED
This error indicates that the operation is not permitted when the stream is capturing.
CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD
A stream capture sequence not initiated with the ::CU_STREAM_CAPTURE_MODE_RELAXED argument to ::cuStreamBeginCapture was passed to ::cuStreamEndCapture in a different thread.
CUDA_ERROR_STUB_LIBRARY
This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error.
CUDA_ERROR_SYSTEM_DRIVER_MISMATCH
This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions.
CUDA_ERROR_SYSTEM_NOT_READY
This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide.
CUDA_ERROR_TIMEOUT
This error indicates that the timeout specified for the wait operation has lapsed.
CUDA_ERROR_TOO_MANY_PEERS
This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to ::cuCtxEnablePeerAccess().
CUDA_ERROR_UNKNOWN
This indicates that an unknown internal error has occurred.
CUDA_ERROR_UNMAP_FAILED
This indicates that an unmap or unregister operation has failed.
CUDA_ERROR_UNSUPPORTED_DEVSIDE_SYNC
This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize.
CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY
This indicates that the ::CUexecAffinityType passed to the API call is not supported by the active device.
CUDA_ERROR_UNSUPPORTED_LIMIT
This indicates that the ::CUlimit passed to the API call is not supported by the active device.
CUDA_ERROR_UNSUPPORTED_PTX_VERSION
This indicates that the provided PTX was compiled with an unsupported toolchain.
CUDA_SUCCESS
The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see ::cuEventQuery() and ::cuStreamQuery()).
CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT
< Number of asynchronous engines
CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES
< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See \ref CUDA_MEMOP for additional details.
CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY
< Device can map host memory into CUDA address space
CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER
< Deprecated, do not use.
CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS
< 64-bit operations are supported in ::cuStreamBatchMemOp and related MemOp APIs.
CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS_V1
< Deprecated, along with v1 MemOps API, 64-bit operations are supported in ::cuStreamBatchMemOp and related APIs.
CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM
< Device can access host registered memory at the same virtual address as the CPU
CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS_V1
< Deprecated, along with v1 MemOps API, ::cuStreamBatchMemOp and related APIs are supported.
CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR
< ::CU_STREAM_WAIT_VALUE_NOR is supported by MemOp APIs.
CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR_V1
< Deprecated, along with v1 MemOps API, ::CU_STREAM_WAIT_VALUE_NOR is supported.
CU_DEVICE_ATTRIBUTE_CLOCK_RATE
< Typical clock frequency in kilohertz
CU_DEVICE_ATTRIBUTE_CLUSTER_LAUNCH
< Indicates device supports cluster launch
CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR
< Major compute capability version number
CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR
< Minor compute capability version number
CU_DEVICE_ATTRIBUTE_COMPUTE_MODE
< Compute mode (See ::CUcomputemode for details)
CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED
< Device supports compute preemption.
CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS
< Device can possibly execute multiple kernels concurrently
CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS
< Device can coherently access managed memory concurrently with the CPU
CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH
< Device supports launching cooperative kernels via ::cuLaunchCooperativeKernel
CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH
< Deprecated, ::cuLaunchCooperativeKernelMultiDevice is deprecated.
CU_DEVICE_ATTRIBUTE_D3D12_CIG_SUPPORTED
< Device supports CIG with D3D12.
CU_DEVICE_ATTRIBUTE_DEFERRED_MAPPING_CUDA_ARRAY_SUPPORTED
< Device supports deferred mapping CUDA arrays and CUDA mipmapped arrays
CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST
< The host can directly access managed memory on the device without migration.
CU_DEVICE_ATTRIBUTE_DMA_BUF_SUPPORTED
< Device supports buffer sharing with dma_buf mechanism.
CU_DEVICE_ATTRIBUTE_ECC_ENABLED
< Device has ECC support enabled
CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED
< Device supports compression of memory
CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED
< Device supports caching globals in L1
CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH
< Global memory bus width in bits
CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS
< The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum
CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED
< Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information)
CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED
< Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate
CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING
< GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here.
CU_DEVICE_ATTRIBUTE_GPU_OVERLAP
< Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT.
CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_FABRIC_SUPPORTED
< Device supports exporting memory to a fabric handle with cuMemExportToShareableHandle() or requested with cuMemCreate()
CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED
< Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED
< Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED
< Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate
CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED
< Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware)
CU_DEVICE_ATTRIBUTE_HOST_NUMA_ID
< NUMA ID of the host node closest to the device. Returns -1 when system does not support NUMA.
CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED
< Device supports host memory registration via ::cudaHostRegister.
CU_DEVICE_ATTRIBUTE_INTEGRATED
< Device is integrated with host memory
CU_DEVICE_ATTRIBUTE_IPC_EVENT_SUPPORTED
< Device supports IPC Events.
CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT
< Specifies whether there is a run time limit on kernels
CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE
< Size of L2 cache in bytes
CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED
< Device supports caching locals in L1
CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY
< Device can allocate managed memory on this system
CU_DEVICE_ATTRIBUTE_MAX
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS
< Maximum layers in a 1D layered surface
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH
< Maximum 1D layered surface width
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH
< Maximum 1D surface width
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT
< Maximum 2D surface height
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT
< Maximum 2D layered surface height
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS
< Maximum layers in a 2D layered surface
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH
< Maximum 2D layered surface width
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH
< Maximum 2D surface width
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH
< Maximum 3D surface depth
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT
< Maximum 3D surface height
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH
< Maximum 3D surface width
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS
< Maximum layers in a cubemap layered surface
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH
< Maximum cubemap layered surface width
CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH
< Maximum cubemap surface width
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS
< Maximum layers in a 1D layered texture
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH
< Maximum 1D layered texture width
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH
< Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead.
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH
< Maximum mipmapped 1D texture width
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH
< Maximum 1D texture width
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT
< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES
< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH
< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT
< Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH
< Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT
< Maximum 2D texture height
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT
< Maximum 2D layered texture height
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS
< Maximum layers in a 2D layered texture
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH
< Maximum 2D layered texture width
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT
< Maximum 2D linear texture height
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH
< Maximum 2D linear texture pitch in bytes
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH
< Maximum 2D linear texture width
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT
< Maximum mipmapped 2D texture height
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH
< Maximum mipmapped 2D texture width
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH
< Maximum 2D texture width
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH
< Maximum 3D texture depth
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE
< Alternate maximum 3D texture depth
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT
< Maximum 3D texture height
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE
< Alternate maximum 3D texture height
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH
< Maximum 3D texture width
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE
< Alternate maximum 3D texture width
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS
< Maximum layers in a cubemap layered texture
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH
< Maximum cubemap layered texture width/height
CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH
< Maximum cubemap texture width/height
CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE
< Maximum value of CUaccessPolicyWindow::num_bytes.
CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR
< Maximum number of blocks per multiprocessor
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X
< Maximum block dimension X
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y
< Maximum block dimension Y
CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z
< Maximum block dimension Z
CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X
< Maximum grid dimension X
CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y
< Maximum grid dimension Y
CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z
< Maximum grid dimension Z
CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE
< Maximum L2 persisting lines capacity setting in bytes.
CU_DEVICE_ATTRIBUTE_MAX_PITCH
< Maximum pitch in bytes allowed by memory copies
CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK
< Maximum number of 32-bit registers available per block
CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR
< Maximum number of 32-bit registers available per multiprocessor
CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK
< Maximum shared memory available per block in bytes
CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN
< Maximum optin shared memory per block
CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR
< Maximum shared memory available per multiprocessor in bytes
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK
< Maximum number of threads per block
CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR
< Maximum resident threads per multiprocessor
CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE
< Peak memory clock frequency in kilohertz
CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED
< Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs
CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES
< Handle types supported with mempool based IPC
CU_DEVICE_ATTRIBUTE_MEM_SYNC_DOMAIN_COUNT
< Number of memory domains the device supports.
CU_DEVICE_ATTRIBUTE_MPS_ENABLED
< Indicates if contexts created on this device will be shared via MPS
CU_DEVICE_ATTRIBUTE_MULTICAST_SUPPORTED
< Device supports switch multicast and reduction operations.
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT
< Number of multiprocessors on device
CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD
< Device is on a multi-GPU board
CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID
< Unique id for a group of devices on the same multi-GPU board
CU_DEVICE_ATTRIBUTE_NUMA_CONFIG
< NUMA configuration of a device: value is of type ::CUdeviceNumaConfig enum
CU_DEVICE_ATTRIBUTE_NUMA_ID
< NUMA node ID of the GPU memory
CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS
< Device supports coherently accessing pageable memory without calling cudaHostRegister on it
CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES
< Device accesses pageable memory via the host’s page tables.
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID
< PCI bus ID of the device
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID
< PCI device ID of the device
CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID
< PCI domain ID of the device
CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED
< Device supports using the ::cuMemHostRegister flag ::CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU
CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK
< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK
CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK
< Shared memory reserved by CUDA driver per block in bytes
CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK
< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK
CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO
< Ratio of single precision performance (in floating-point operations per second) to double precision performance
CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED
< Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays
CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED
< Device supports stream priorities
CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT
< Alignment requirement for surfaces
CU_DEVICE_ATTRIBUTE_TCC_DRIVER
< Device is using TCC driver model
CU_DEVICE_ATTRIBUTE_TENSOR_MAP_ACCESS_SUPPORTED
< Device supports accessing memory using Tensor Map.
CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT
< Alignment requirement for textures
CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT
< Pitch alignment requirement for textures
CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED
< External timeline semaphore interop is supported on the device
CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY
< Memory available on device for constant variables in a CUDA C kernel in bytes
CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING
< Device shares a unified address space with the host
CU_DEVICE_ATTRIBUTE_UNIFIED_FUNCTION_POINTERS
< Device supports unified function pointers.
CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED
< Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED
CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED
< Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs
CU_DEVICE_ATTRIBUTE_WARP_SIZE
< Warp size in threads
CU_MEM_ACCESS_FLAGS_PROT_MAX
CU_MEM_ACCESS_FLAGS_PROT_NONE
< Default, make the address range not accessible
CU_MEM_ACCESS_FLAGS_PROT_READ
< Make the address range read accessible
CU_MEM_ACCESS_FLAGS_PROT_READWRITE
< Make the address range read-write accessible
CU_MEM_ALLOCATION_TYPE_INVALID
CU_MEM_ALLOCATION_TYPE_MAX
This allocation type is ‘pinned’, i.e. cannot migrate from its current location while the application is actively using it
CU_MEM_ALLOCATION_TYPE_PINNED
This allocation type is ‘pinned’, i.e. cannot migrate from its current location while the application is actively using it
CU_MEM_ALLOC_GRANULARITY_MINIMUM
< Minimum required granularity for allocation
CU_MEM_ALLOC_GRANULARITY_RECOMMENDED
< Recommended granularity for allocation for best performance
CU_MEM_HANDLE_TYPE_FABRIC
< Allows a fabric handle to be used for exporting. (CUmemFabricHandle)
CU_MEM_HANDLE_TYPE_MAX
CU_MEM_HANDLE_TYPE_NONE
< Does not allow any export mechanism. >
CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR
< Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int)
CU_MEM_HANDLE_TYPE_WIN32
< Allows a Win32 NT handle to be used for exporting. (HANDLE)
CU_MEM_HANDLE_TYPE_WIN32_KMT
< Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE)
CU_MEM_LOCATION_TYPE_DEVICE
< Location is a device location, thus id is a device ordinal
CU_MEM_LOCATION_TYPE_HOST
< Location is host, id is ignored
CU_MEM_LOCATION_TYPE_HOST_NUMA
< Location is a host NUMA node, thus id is a host NUMA node id
CU_MEM_LOCATION_TYPE_HOST_NUMA_CURRENT
< Location is a host NUMA node of the current thread, id is ignored
CU_MEM_LOCATION_TYPE_INVALID
CU_MEM_LOCATION_TYPE_MAX
CU_MEM_RANGE_HANDLE_TYPE_DMA_BUF_FD
CU_MEM_RANGE_HANDLE_TYPE_MAX
CU_POINTER_ATTRIBUTE_ACCESS_FLAGS
< Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given
CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES
< Bitmask of allowed ::CUmemAllocationHandleType for this allocation
CU_POINTER_ATTRIBUTE_BUFFER_ID
< A process-wide unique ID for an allocated memory region
CU_POINTER_ATTRIBUTE_CONTEXT
< The ::CUcontext on which a pointer was allocated or registered
CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL
< A device ordinal of a device on which a pointer was allocated or registered
CU_POINTER_ATTRIBUTE_DEVICE_POINTER
< The address at which a pointer’s memory may be accessed on the device
CU_POINTER_ATTRIBUTE_HOST_POINTER
< The address at which a pointer’s memory may be accessed on the host
CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE
< 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API
CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE
< 1 if this pointer maps to an allocation that is suitable for ::cudaIpcGetMemHandle, 0 otherwise
CU_POINTER_ATTRIBUTE_IS_MANAGED
< Indicates if the pointer points to managed memory
CU_POINTER_ATTRIBUTE_MAPPED
< 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise
CU_POINTER_ATTRIBUTE_MAPPING_BASE_ADDR
< The start address of the mapping that the pointer belongs to
CU_POINTER_ATTRIBUTE_MAPPING_SIZE
< Size of the actual underlying mapping that the pointer belongs to
CU_POINTER_ATTRIBUTE_MEMORY_BLOCK_ID
< A process-wide unique id corresponding to the physical allocation the pointer belongs to
CU_POINTER_ATTRIBUTE_MEMORY_TYPE
< The ::CUmemorytype describing the physical location of a pointer
CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE
< Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL.
CU_POINTER_ATTRIBUTE_P2P_TOKENS
< A pair of tokens for use with the nv-p2p.h Linux kernel interface
CU_POINTER_ATTRIBUTE_RANGE_SIZE
< Size of the address range for this requested pointer
CU_POINTER_ATTRIBUTE_RANGE_START_ADDR
< Starting address for this requested pointer
CU_POINTER_ATTRIBUTE_SYNC_MEMOPS
< Synchronize every synchronous memory operation initiated on this region
IBV_ACCESS_OPTIONAL_FIRST
IBV_ATOMIC_GLOB
IBV_ATOMIC_HCA
IBV_ATOMIC_NONE
IBV_COUNTER_BYTES
IBV_COUNTER_PACKETS
IBV_CQ_ATTR_MODERATE
IBV_CQ_ATTR_RESERVED
IBV_CQ_INIT_ATTR_MASK_FLAGS
IBV_CQ_INIT_ATTR_MASK_PD
IBV_CREATE_CQ_ATTR_IGNORE_OVERRUN
IBV_CREATE_CQ_ATTR_SINGLE_THREADED
IBV_CREATE_CQ_SUP_WC_FLAGS
IBV_CREATE_IND_TABLE_RESERVED
IBV_DEVICE_AUTO_PATH_MIG
IBV_DEVICE_BAD_PKEY_CNTR
IBV_DEVICE_BAD_QKEY_CNTR
IBV_DEVICE_CHANGE_PHY_PORT
IBV_DEVICE_CURR_QP_STATE_MOD
IBV_DEVICE_INIT_TYPE
IBV_DEVICE_MANAGED_FLOW_STEERING
IBV_DEVICE_MEM_MGT_EXTENSIONS
IBV_DEVICE_MEM_WINDOW
IBV_DEVICE_MEM_WINDOW_TYPE_2A
IBV_DEVICE_MEM_WINDOW_TYPE_2B
IBV_DEVICE_N_NOTIFY_CQ
IBV_DEVICE_PCI_WRITE_END_PADDING
IBV_DEVICE_PORT_ACTIVE_EVENT
IBV_DEVICE_RAW_IP_CSUM
IBV_DEVICE_RAW_MULTI
IBV_DEVICE_RAW_SCATTER_FCS
IBV_DEVICE_RC_IP_CSUM
IBV_DEVICE_RC_RNR_NAK_GEN
IBV_DEVICE_RESIZE_MAX_WR
IBV_DEVICE_SHUTDOWN_PORT
IBV_DEVICE_SRQ_RESIZE
IBV_DEVICE_SYS_IMAGE_GUID
IBV_DEVICE_UD_AV_PORT_ENFORCE
IBV_DEVICE_UD_IP_CSUM
IBV_DEVICE_XRC
IBV_DM_MASK_HANDLE
IBV_EVENT_CLIENT_REREGISTER
IBV_EVENT_COMM_EST
IBV_EVENT_CQ_ERR
IBV_EVENT_DEVICE_FATAL
IBV_EVENT_GID_CHANGE
IBV_EVENT_LID_CHANGE
IBV_EVENT_PATH_MIG
IBV_EVENT_PATH_MIG_ERR
IBV_EVENT_PKEY_CHANGE
IBV_EVENT_PORT_ACTIVE
IBV_EVENT_PORT_ERR
IBV_EVENT_QP_ACCESS_ERR
IBV_EVENT_QP_FATAL
IBV_EVENT_QP_LAST_WQE_REACHED
IBV_EVENT_QP_REQ_ERR
IBV_EVENT_SM_CHANGE
IBV_EVENT_SQ_DRAINED
IBV_EVENT_SRQ_ERR
IBV_EVENT_SRQ_LIMIT_REACHED
IBV_EVENT_WQ_FATAL
IBV_FLOW_ACTION_ESP_MASK_ESN
IBV_FLOW_ATTR_ALL_DEFAULT
IBV_FLOW_ATTR_FLAGS_DONT_TRAP
IBV_FLOW_ATTR_FLAGS_EGRESS
IBV_FLOW_ATTR_MC_DEFAULT
IBV_FLOW_ATTR_NORMAL
IBV_FLOW_ATTR_SNIFFER
IBV_FLOW_SPEC_ACTION_COUNT
IBV_FLOW_SPEC_ACTION_DROP
IBV_FLOW_SPEC_ACTION_HANDLE
IBV_FLOW_SPEC_ACTION_TAG
IBV_FLOW_SPEC_ESP
IBV_FLOW_SPEC_ETH
IBV_FLOW_SPEC_GRE
IBV_FLOW_SPEC_INNER
IBV_FLOW_SPEC_IPV4
IBV_FLOW_SPEC_IPV6
IBV_FLOW_SPEC_IPV4_EXT
IBV_FLOW_SPEC_MPLS
IBV_FLOW_SPEC_TCP
IBV_FLOW_SPEC_UDP
IBV_FLOW_SPEC_VXLAN_TUNNEL
IBV_FLUSH_GLOBAL
IBV_FLUSH_MR
IBV_FLUSH_PERSISTENT
IBV_FLUSH_RANGE
IBV_FORK_DISABLED
IBV_FORK_ENABLED
IBV_FORK_UNNEEDED
IBV_GID_TYPE_IB
IBV_GID_TYPE_ROCE_V1
IBV_GID_TYPE_ROCE_V2
IBV_LINK_LAYER_ETHERNET
IBV_LINK_LAYER_INFINIBAND
IBV_LINK_LAYER_UNSPECIFIED
IBV_MIG_ARMED
IBV_MIG_MIGRATED
IBV_MIG_REARM
IBV_MTU_256
IBV_MTU_512
IBV_MTU_1024
IBV_MTU_2048
IBV_MTU_4096
IBV_MW_TYPE_1
IBV_MW_TYPE_2
IBV_NODE_CA
IBV_NODE_RNIC
IBV_NODE_ROUTER
IBV_NODE_SWITCH
IBV_NODE_UNKNOWN
IBV_NODE_UNSPECIFIED
IBV_NODE_USNIC
IBV_NODE_USNIC_UDP
IBV_ODP_SUPPORT
IBV_ODP_SUPPORT_ATOMIC
IBV_ODP_SUPPORT_IMPLICIT
IBV_ODP_SUPPORT_READ
IBV_ODP_SUPPORT_RECV
IBV_ODP_SUPPORT_SEND
IBV_ODP_SUPPORT_SRQ_RECV
IBV_ODP_SUPPORT_WRITE
IBV_OPS_SIGNALED
IBV_OPS_TM_SYNC
IBV_PARENT_DOMAIN_INIT_ATTR_ALLOCATORS
IBV_PARENT_DOMAIN_INIT_ATTR_PD_CONTEXT
IBV_PCI_ATOMIC_OPERATION_4_BYTE_SIZE_SUP
IBV_PCI_ATOMIC_OPERATION_8_BYTE_SIZE_SUP
IBV_PCI_ATOMIC_OPERATION_16_BYTE_SIZE_SUP
IBV_PORT_INFO_EXT_SUP
IBV_PORT_LINK_SPEED_HDR_SUP
IBV_PORT_LINK_SPEED_NDR_SUP
IBV_PORT_LINK_WIDTH_2X_SUP
IBV_PORT_SET_NODE_DESC_SUP
IBV_PORT_SWITCH_PORT_STATE_TABLE_SUP
IBV_PORT_VIRT_SUP
IBV_QP_CREATE_BLOCK_SELF_MCAST_LB
IBV_QP_CREATE_CVLAN_STRIPPING
IBV_QP_CREATE_PCI_WRITE_END_PADDING
IBV_QP_CREATE_SCATTER_FCS
IBV_QP_CREATE_SOURCE_QPN
IBV_QP_EX_WITH_ATOMIC_CMP_AND_SWP
IBV_QP_EX_WITH_ATOMIC_FETCH_AND_ADD
IBV_QP_EX_WITH_ATOMIC_WRITE
IBV_QP_EX_WITH_BIND_MW
IBV_QP_EX_WITH_FLUSH
IBV_QP_EX_WITH_LOCAL_INV
IBV_QP_EX_WITH_RDMA_READ
IBV_QP_EX_WITH_RDMA_WRITE
IBV_QP_EX_WITH_RDMA_WRITE_WITH_IMM
IBV_QP_EX_WITH_SEND
IBV_QP_EX_WITH_SEND_WITH_IMM
IBV_QP_EX_WITH_SEND_WITH_INV
IBV_QP_EX_WITH_TSO
IBV_QP_INIT_ATTR_CREATE_FLAGS
IBV_QP_INIT_ATTR_IND_TABLE
IBV_QP_INIT_ATTR_MAX_TSO_HEADER
IBV_QP_INIT_ATTR_PD
IBV_QP_INIT_ATTR_RX_HASH
IBV_QP_INIT_ATTR_SEND_OPS_FLAGS
IBV_QP_INIT_ATTR_XRCD
IBV_QP_OPEN_ATTR_CONTEXT
IBV_QP_OPEN_ATTR_NUM
IBV_QP_OPEN_ATTR_RESERVED
IBV_QP_OPEN_ATTR_TYPE
IBV_QP_OPEN_ATTR_XRCD
IBV_QUERY_QP_DATA_IN_ORDER_ALIGNED_128_BYTES
IBV_QUERY_QP_DATA_IN_ORDER_RETURN_CAPS
IBV_QUERY_QP_DATA_IN_ORDER_WHOLE_MSG
IBV_RATE_2_5_GBPS
IBV_RATE_5_GBPS
IBV_RATE_10_GBPS
IBV_RATE_14_GBPS
IBV_RATE_20_GBPS
IBV_RATE_25_GBPS
IBV_RATE_28_GBPS
IBV_RATE_30_GBPS
IBV_RATE_40_GBPS
IBV_RATE_50_GBPS
IBV_RATE_56_GBPS
IBV_RATE_60_GBPS
IBV_RATE_80_GBPS
IBV_RATE_100_GBPS
IBV_RATE_112_GBPS
IBV_RATE_120_GBPS
IBV_RATE_168_GBPS
IBV_RATE_200_GBPS
IBV_RATE_300_GBPS
IBV_RATE_400_GBPS
IBV_RATE_600_GBPS
IBV_RATE_800_GBPS
IBV_RATE_1200_GBPS
IBV_RATE_MAX
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING
IBV_RAW_PACKET_CAP_DELAY_DROP
IBV_RAW_PACKET_CAP_IP_CSUM
IBV_RAW_PACKET_CAP_SCATTER_FCS
IBV_READ_COUNTERS_ATTR_PREFER_CACHED
IBV_REREG_MR_CHANGE_ACCESS
IBV_REREG_MR_CHANGE_PD
IBV_REREG_MR_CHANGE_TRANSLATION
IBV_REREG_MR_ERR_CMD
IBV_REREG_MR_ERR_CMD_AND_DO_FORK_NEW
IBV_REREG_MR_ERR_DONT_FORK_NEW
IBV_REREG_MR_ERR_DO_FORK_OLD
IBV_REREG_MR_ERR_INPUT
IBV_REREG_MR_FLAGS_SUPPORTED
IBV_RX_HASH_DST_IPV4
IBV_RX_HASH_DST_IPV6
IBV_RX_HASH_DST_PORT_TCP
IBV_RX_HASH_DST_PORT_UDP
IBV_RX_HASH_FUNC_TOEPLITZ
IBV_RX_HASH_INNER
IBV_RX_HASH_IPSEC_SPI
IBV_RX_HASH_SRC_IPV4
IBV_RX_HASH_SRC_IPV6
IBV_RX_HASH_SRC_PORT_TCP
IBV_RX_HASH_SRC_PORT_UDP
IBV_SRQT_BASIC
IBV_SRQT_TM
IBV_SRQT_XRC
IBV_SRQ_INIT_ATTR_CQ
IBV_SRQ_INIT_ATTR_PD
IBV_SRQ_INIT_ATTR_RESERVED
IBV_SRQ_INIT_ATTR_TM
IBV_SRQ_INIT_ATTR_TYPE
IBV_SRQ_INIT_ATTR_XRCD
IBV_SRQ_LIMIT
IBV_SRQ_MAX_WR
IBV_SYSFS_NAME_MAX
IBV_SYSFS_PATH_MAX
IBV_TMH_EAGER
IBV_TMH_FIN
IBV_TMH_NO_TAG
IBV_TMH_RNDV
IBV_TM_CAP_RC
IBV_TRANSPORT_IB
IBV_TRANSPORT_IWARP
IBV_TRANSPORT_UNKNOWN
IBV_TRANSPORT_UNSPECIFIED
IBV_TRANSPORT_USNIC
IBV_TRANSPORT_USNIC_UDP
IBV_VALUES_MASK_RAW_CLOCK
IBV_VALUES_MASK_RESERVED
IBV_WC_EX_WITH_BYTE_LEN
IBV_WC_EX_WITH_COMPLETION_TIMESTAMP
IBV_WC_EX_WITH_COMPLETION_TIMESTAMP_WALLCLOCK
IBV_WC_EX_WITH_CVLAN
IBV_WC_EX_WITH_DLID_PATH_BITS
IBV_WC_EX_WITH_FLOW_TAG
IBV_WC_EX_WITH_IMM
IBV_WC_EX_WITH_QP_NUM
IBV_WC_EX_WITH_SL
IBV_WC_EX_WITH_SLID
IBV_WC_EX_WITH_SRC_QP
IBV_WC_EX_WITH_TM_INFO
IBV_WC_IP_CSUM_OK_SHIFT
IBV_WC_STANDARD_FLAGS
IBV_WQS_ERR
IBV_WQS_RDY
IBV_WQS_RESET
IBV_WQS_UNKNOWN
IBV_WQT_RQ
IBV_WQ_ATTR_CURR_STATE
IBV_WQ_ATTR_FLAGS
IBV_WQ_ATTR_RESERVED
IBV_WQ_ATTR_STATE
IBV_WQ_FLAGS_CVLAN_STRIPPING
IBV_WQ_FLAGS_DELAY_DROP
IBV_WQ_FLAGS_PCI_WRITE_END_PADDING
IBV_WQ_FLAGS_RESERVED
IBV_WQ_FLAGS_SCATTER_FCS
IBV_WQ_INIT_ATTR_FLAGS
IBV_WQ_INIT_ATTR_RESERVED
IBV_WR_TAG_ADD
IBV_WR_TAG_DEL
IBV_WR_TAG_SYNC
IBV_XRCD_INIT_ATTR_FD
IBV_XRCD_INIT_ATTR_OFLAGS
IBV_XRCD_INIT_ATTR_RESERVED
IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM
IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP
IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE
MLX5DV_BLOCK_SIZE_512
MLX5DV_BLOCK_SIZE_520
MLX5DV_BLOCK_SIZE_4048
MLX5DV_BLOCK_SIZE_4096
MLX5DV_BLOCK_SIZE_4160
MLX5DV_BLOCK_SIZE_CAP_512
MLX5DV_BLOCK_SIZE_CAP_520
MLX5DV_BLOCK_SIZE_CAP_4048
MLX5DV_BLOCK_SIZE_CAP_4096
MLX5DV_BLOCK_SIZE_CAP_4160
MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP
MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD
MLX5DV_CONTEXT_FLAGS_CQE_V1
MLX5DV_CONTEXT_FLAGS_DEVX
MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW
MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED
MLX5DV_CONTEXT_FLAGS_OBSOLETE
MLX5DV_CONTEXT_FLAGS_PACKET_BASED_CREDIT_MODE
MLX5DV_CONTEXT_FLAGS_REAL_TIME_TS
MLX5DV_CONTEXT_MASK_CLOCK_INFO_UPDATE
MLX5DV_CONTEXT_MASK_CQE_COMPRESION
MLX5DV_CONTEXT_MASK_CRYPTO_OFFLOAD
MLX5DV_CONTEXT_MASK_DCI_STREAMS
MLX5DV_CONTEXT_MASK_DC_ODP_CAPS
MLX5DV_CONTEXT_MASK_DYN_BFREGS
MLX5DV_CONTEXT_MASK_FLOW_ACTION_FLAGS
MLX5DV_CONTEXT_MASK_HCA_CORE_CLOCK
MLX5DV_CONTEXT_MASK_MAX_DC_RD_ATOM
MLX5DV_CONTEXT_MASK_NUM_LAG_PORTS
MLX5DV_CONTEXT_MASK_SIGNATURE_OFFLOAD
MLX5DV_CONTEXT_MASK_STRIDING_RQ
MLX5DV_CONTEXT_MASK_SWP
MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS
MLX5DV_CONTEXT_MASK_WR_MEMCPY_LENGTH
MLX5DV_CQE_RES_FORMAT_CSUM
MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX
MLX5DV_CQE_RES_FORMAT_HASH
MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD
MLX5DV_CQ_INIT_ATTR_FLAGS_RESERVED
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE
MLX5DV_CQ_INIT_ATTR_MASK_CQE_SIZE
MLX5DV_CQ_INIT_ATTR_MASK_FLAGS
MLX5DV_CRYPTO_CAPS_CRYPTO
MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_GOING_TO_COMMISSIONING
MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_OPERATIONAL
MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS
MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS_MULTI_BLOCK
MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS_SINGLE_BLOCK
MLX5DV_CRYPTO_KEY_PURPOSE_AES_XTS
MLX5DV_CRYPTO_KEY_SIZE_128
MLX5DV_CRYPTO_KEY_SIZE_256
MLX5DV_CRYPTO_LOGIN_STATE_INVALID
MLX5DV_CRYPTO_LOGIN_STATE_NO_LOGIN
MLX5DV_CRYPTO_LOGIN_STATE_VALID
MLX5DV_CRYPTO_STANDARD_AES_XTS
MLX5DV_CRYPTO_WRAPPED_IMPORT_METHOD_CAP_AES_XTS
MLX5DV_CTX_ATTR_BUF_ALLOCATORS
MLX5DV_DCTYPE_DCI
MLX5DV_DCTYPE_DCT
MLX5DV_DEK_INIT_ATTR_CRYPTO_LOGIN
MLX5DV_DEK_STATE_ERROR
MLX5DV_DEK_STATE_READY
MLX5DV_DM_MASK_REMOTE_VA
MLX5DV_DR_ACTION_DEST
MLX5DV_DR_ACTION_DEST_REFORMAT
MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR
MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER
MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET
MLX5DV_DR_ACTION_FLAGS_ASO_FLOW_METER_GREEN
MLX5DV_DR_ACTION_FLAGS_ASO_FLOW_METER_RED
MLX5DV_DR_ACTION_FLAGS_ASO_FLOW_METER_UNDEFINED
MLX5DV_DR_ACTION_FLAGS_ASO_FLOW_METER_YELLOW
MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL
MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW
MLX5DV_DR_DOMAIN_SYNC_FLAGS_MEM
MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW
MLX5DV_DR_DOMAIN_TYPE_FDB
MLX5DV_DR_DOMAIN_TYPE_NIC_RX
MLX5DV_DR_DOMAIN_TYPE_NIC_TX
MLX5DV_DR_MATCHER_LAYOUT_NUM_RULE
MLX5DV_DR_MATCHER_LAYOUT_RESIZABLE
MLX5DV_FLOW_ACTION_COUNTERS_DEVX
MLX5DV_FLOW_ACTION_DEFAULT_MISS
MLX5DV_FLOW_ACTION_DEST_DEVX
MLX5DV_FLOW_ACTION_DEST_IBV_QP
MLX5DV_FLOW_ACTION_DROP
MLX5DV_FLOW_ACTION_ESP_MASK_FLAGS
MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM
MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD
MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA
MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING
MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN
MLX5DV_FLOW_ACTION_IBV_COUNTER
MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION
MLX5DV_FLOW_ACTION_TAG
MLX5DV_FLOW_MATCHER_MASK_FT_TYPE
MLX5DV_MKEY_CONF_FLAG_RESET_SIG_ATTR
MLX5DV_MKEY_INIT_ATTR_FLAGS_BLOCK_SIGNATURE
MLX5DV_MKEY_INIT_ATTR_FLAGS_CRYPTO
MLX5DV_MKEY_INIT_ATTR_FLAGS_INDIRECT
MLX5DV_MKEY_INIT_ATTR_FLAGS_REMOTE_INVALIDATE
MLX5DV_MKEY_INIT_ATTR_FLAGS_UPDATE_TAG
MLX5DV_MKEY_NO_ERR
MLX5DV_MKEY_SIG_BLOCK_BAD_APPTAG
MLX5DV_MKEY_SIG_BLOCK_BAD_GUARD
MLX5DV_MKEY_SIG_BLOCK_BAD_REFTAG
MLX5DV_OBJ_AH
MLX5DV_OBJ_CQ
MLX5DV_OBJ_DM
MLX5DV_OBJ_PD
MLX5DV_OBJ_QP
MLX5DV_OBJ_RWQ
MLX5DV_OBJ_SRQ
MLX5DV_QP_CREATE_ALLOW_SCATTER_TO_CQE
MLX5DV_QP_CREATE_DISABLE_SCATTER_TO_CQE
MLX5DV_QP_CREATE_PACKET_BASED_CREDIT_MODE
MLX5DV_QP_CREATE_SIG_PIPELINING
MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_MC
MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC
MLX5DV_QP_CREATE_TUNNEL_OFFLOADS
MLX5DV_QP_EX_WITH_MEMCPY
MLX5DV_QP_EX_WITH_MKEY_CONFIGURE
MLX5DV_QP_EX_WITH_MR_INTERLEAVED
MLX5DV_QP_EX_WITH_MR_LIST
MLX5DV_QP_EX_WITH_RAW_WQE
MLX5DV_QP_INIT_ATTR_MASK_DC
MLX5DV_QP_INIT_ATTR_MASK_DCI_STREAMS
MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS
MLX5DV_QP_INIT_ATTR_MASK_SEND_OPS_FLAGS
MLX5DV_QP_MASK_RAW_QP_HANDLES
MLX5DV_QP_MASK_RAW_QP_TIR_ADDR
MLX5DV_QP_MASK_UAR_MMAP_OFFSET
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN
MLX5DV_SCHED_ELEM_ATTR_FLAGS_BW_SHARE
MLX5DV_SCHED_ELEM_ATTR_FLAGS_MAX_AVG_BW
MLX5DV_SIGNATURE_CRYPTO_ORDER_SIGNATURE_AFTER_CRYPTO_ON_TX
MLX5DV_SIGNATURE_CRYPTO_ORDER_SIGNATURE_BEFORE_CRYPTO_ON_TX
MLX5DV_SIG_BLOCK_ATTR_FLAG_COPY_MASK
MLX5DV_SIG_CRC_TYPE_CAP_CRC32
MLX5DV_SIG_CRC_TYPE_CAP_CRC32C
MLX5DV_SIG_CRC_TYPE_CAP_CRC64_XP10
MLX5DV_SIG_CRC_TYPE_CRC32
MLX5DV_SIG_CRC_TYPE_CRC32C
MLX5DV_SIG_CRC_TYPE_CRC64_XP10
MLX5DV_SIG_MASK_CRC32
MLX5DV_SIG_MASK_CRC32C
MLX5DV_SIG_MASK_CRC64_XP10
MLX5DV_SIG_MASK_T10DIF_APPTAG
MLX5DV_SIG_MASK_T10DIF_GUARD
MLX5DV_SIG_MASK_T10DIF_REFTAG
MLX5DV_SIG_PROT_CAP_CRC
MLX5DV_SIG_PROT_CAP_T10DIF
MLX5DV_SIG_T10DIF_BG_CAP_CRC
MLX5DV_SIG_T10DIF_BG_CAP_CSUM
MLX5DV_SIG_T10DIF_CRC
MLX5DV_SIG_T10DIF_CSUM
MLX5DV_SIG_T10DIF_FLAG_APP_ESCAPE
MLX5DV_SIG_T10DIF_FLAG_APP_REF_ESCAPE
MLX5DV_SIG_T10DIF_FLAG_REF_REMAP
MLX5DV_SIG_TYPE_CRC
MLX5DV_SIG_TYPE_T10DIF
MLX5DV_SRQ_MASK_SRQN
MLX5DV_SW_PARSING
MLX5DV_SW_PARSING_CSUM
MLX5DV_SW_PARSING_LSO
MLX5DV_UMEM_MASK_DMABUF
MLX5DV_VFIO_CTX_FLAGS_INIT_LINK_DOWN
MLX5DV_WC_MEMCPY
MLX5DV_WC_RAW_WQE
MLX5DV_WC_UMR
MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ
MLX5_CQE_INVALID
MLX5_CQE_L2_OK
MLX5_CQE_L3_HDR_TYPE_IPV4
MLX5_CQE_L3_HDR_TYPE_IPV6
MLX5_CQE_L3_HDR_TYPE_NONE
MLX5_CQE_L3_OK
MLX5_CQE_L4_OK
MLX5_CQE_NO_PACKET
MLX5_CQE_OWNER_MASK
MLX5_CQE_REQ
MLX5_CQE_REQ_ERR
MLX5_CQE_RESIZE_CQ
MLX5_CQE_RESP_ERR
MLX5_CQE_RESP_SEND
MLX5_CQE_RESP_SEND_IMM
MLX5_CQE_RESP_SEND_INV
MLX5_CQE_RESP_WR_IMM
MLX5_CQE_SIG_ERR
MLX5_CQE_SYNDROME_BAD_RESP_ERR
MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR
MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR
MLX5_CQE_SYNDROME_LOCAL_PROT_ERR
MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR
MLX5_CQE_SYNDROME_MW_BIND_ERR
MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR
MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR
MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR
MLX5_CQE_SYNDROME_REMOTE_OP_ERR
MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR
MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR
MLX5_CQE_SYNDROME_WR_FLUSH_ERR
MLX5_CQE_VENDOR_SYNDROME_ODP_PFAULT
MLX5_CQ_DB_REQ_NOT
MLX5_CQ_DB_REQ_NOT_SOL
MLX5_CQ_DOORBELL
MLX5_ETH_WQE_L3_CSUM
MLX5_ETH_WQE_L4_CSUM
MLX5_EXTENDED_UD_AV
MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA
MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM
MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM
MLX5_IB_UAPI_DM_TYPE_MEMIC
MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2
MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2
MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX
MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX
MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX
MLX5_INLINE_SCATTER_32
MLX5_INLINE_SCATTER_64
MLX5_INLINE_SEG
MLX5_INVALID_LKEY
MLX5_MMAP_GET_NC_PAGES_CMD
MLX5_MMAP_GET_REGULAR_PAGES_CMD
MLX5_OPCODE_ATOMIC_CS
MLX5_OPCODE_ATOMIC_FA
MLX5_OPCODE_ATOMIC_MASKED_CS
MLX5_OPCODE_ATOMIC_MASKED_FA
MLX5_OPCODE_CONFIG_CMD
MLX5_OPCODE_FLOW_TBL_ACCESS
MLX5_OPCODE_FMR
MLX5_OPCODE_LOCAL_INVAL
MLX5_OPCODE_MMO
MLX5_OPCODE_NOP
MLX5_OPCODE_RDMA_READ
MLX5_OPCODE_RDMA_WRITE
MLX5_OPCODE_RDMA_WRITE_IMM
MLX5_OPCODE_SEND
MLX5_OPCODE_SEND_IMM
MLX5_OPCODE_SEND_INVAL
MLX5_OPCODE_SET_PSV
MLX5_OPCODE_TAG_MATCHING
MLX5_OPCODE_TSO
MLX5_OPCODE_UMR
MLX5_RCV_DBR
MLX5_SEND_WQE_BB
MLX5_SEND_WQE_SHIFT
MLX5_SND_DBR
MLX5_TMC_SUCCESS
MLX5_WQE_CTRL_CQ_UPDATE
MLX5_WQE_CTRL_FENCE
MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE
MLX5_WQE_CTRL_SOLICITED
MLX5_WQE_MKEY_CONTEXT_ACCESS_FLAGS_ATOMIC
MLX5_WQE_MKEY_CONTEXT_ACCESS_FLAGS_LOCAL_READ
MLX5_WQE_MKEY_CONTEXT_ACCESS_FLAGS_LOCAL_WRITE
MLX5_WQE_MKEY_CONTEXT_ACCESS_FLAGS_REMOTE_READ
MLX5_WQE_MKEY_CONTEXT_ACCESS_FLAGS_REMOTE_WRITE
MLX5_WQE_MKEY_CONTEXT_FREE
MLX5_WQE_UMR_CTRL_FLAG_CHECK_FREE
MLX5_WQE_UMR_CTRL_FLAG_CHECK_QPN
MLX5_WQE_UMR_CTRL_FLAG_INLINE
MLX5_WQE_UMR_CTRL_FLAG_TRNSLATION_OFFSET
MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_ATOMIC
MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_LOCAL_WRITE
MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_REMOTE_READ
MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_REMOTE_WRITE
MLX5_WQE_UMR_CTRL_MKEY_MASK_BSF_ENABLE
MLX5_WQE_UMR_CTRL_MKEY_MASK_FREE
MLX5_WQE_UMR_CTRL_MKEY_MASK_LEN
MLX5_WQE_UMR_CTRL_MKEY_MASK_MKEY
MLX5_WQE_UMR_CTRL_MKEY_MASK_QPN
MLX5_WQE_UMR_CTRL_MKEY_MASK_SIG_ERR
MLX5_WQE_UMR_CTRL_MKEY_MASK_START_ADDR
RDMA_QP_TYPE_MLX5DV
RDMA_QP_TYPE_STANDARD
cudaErrorAddressOfConstant
This indicated that the user has taken the address of a constant variable, which was forbidden up until the CUDA 3.1 release. \deprecated This error return is deprecated as of CUDA 3.1. Variables in constant memory may now have their address taken by the runtime via ::cudaGetSymbolAddress().
cudaErrorAlreadyAcquired
This indicates that a resource has already been acquired.
cudaErrorAlreadyMapped
This indicates that the resource is already mapped.
cudaErrorApiFailureBase
Any unhandled CUDA driver error is added to this value and returned via the runtime. Production releases of CUDA should not return such errors. \deprecated This error return is deprecated as of CUDA 4.1.
cudaErrorArrayIsMapped
This indicates that the specified array is currently mapped and thus cannot be destroyed.
cudaErrorAssert
An assert triggered in device code during kernel execution. The device cannot be used again. All existing allocations are invalid. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorCallRequiresNewerDriver
This indicates that the API call requires a newer CUDA driver than the one currently installed. Users should install an updated NVIDIA CUDA driver to allow the API call to succeed.
cudaErrorCapturedEvent
The operation is not permitted on an event which was last recorded in a capturing stream.
cudaErrorCdpNotSupported
This error indicates, that the program is using CUDA Dynamic Parallelism, but the current configuration, like MPS, does not support it.
cudaErrorCdpVersionMismatch
This error indicates, that the program contains an unsupported interaction between different versions of CUDA Dynamic Parallelism.
cudaErrorCompatNotSupportedOnDevice
This error indicates that the system was upgraded to run with forward compatibility but the visible hardware detected by CUDA does not support this configuration. Refer to the compatibility documentation for the supported hardware matrix or ensure that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES environment variable.
cudaErrorContextIsDestroyed
This error indicates that the context current to the calling thread has been destroyed using ::cuCtxDestroy, or is a primary context which has not yet been initialized.
cudaErrorCooperativeLaunchTooLarge
This error indicates that the number of blocks launched per grid for a kernel that was launched via either ::cudaLaunchCooperativeKernel or ::cudaLaunchCooperativeKernelMultiDevice exceeds the maximum number of blocks as allowed by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor or ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors as specified by the device attribute ::cudaDevAttrMultiProcessorCount.
cudaErrorCudartUnloading
This indicates that a CUDA Runtime API call cannot be executed because it is being called during process shut down, at a point in time after CUDA driver has been unloaded.
cudaErrorDeviceAlreadyInUse
This indicates that a call tried to access an exclusive-thread device that is already in use by a different thread.
cudaErrorDeviceNotLicensed
This indicates that the device doesn’t have a valid Grid License.
cudaErrorDeviceUninitialized
This most frequently indicates that there is no context bound to the current thread. This can also be returned if the context passed to an API call is not a valid handle (such as a context that has had ::cuCtxDestroy() invoked on it). This can also be returned if a user mixes different API versions (i.e. 3010 context with 3020 API calls). See ::cuCtxGetApiVersion() for more details.
cudaErrorDevicesUnavailable
This indicates that all CUDA devices are busy or unavailable at the current time. Devices are often busy/unavailable due to use of ::cudaComputeModeProhibited, ::cudaComputeModeExclusiveProcess, or when long running CUDA kernels have filled up the GPU and are blocking new work from starting. They can also be unavailable due to memory constraints on a device that already has active CUDA work being performed.
cudaErrorDuplicateSurfaceName
This indicates that multiple surfaces (across separate CUDA source files in the application) share the same string name.
cudaErrorDuplicateTextureName
This indicates that multiple textures (across separate CUDA source files in the application) share the same string name.
cudaErrorDuplicateVariableName
This indicates that multiple global or constant variables (across separate CUDA source files in the application) share the same string name.
cudaErrorECCUncorrectable
This indicates that an uncorrectable ECC error was detected during execution.
cudaErrorExternalDevice
This indicates that an async error has occurred in a device outside of CUDA. If CUDA was waiting for an external device’s signal before consuming shared data, the external device signaled an error indicating that the data is not valid for consumption. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorFileNotFound
This indicates that the file specified was not found.
cudaErrorFunctionNotLoaded
Indiciates a function handle is not loaded when calling an API that requires a loaded function.
cudaErrorGraphExecUpdateFailure
This error indicates that the graph update was not performed because it included changes which violated constraints specific to instantiated graph update.
cudaErrorHardwareStackError
Device encountered an error in the call stack during kernel execution, possibly due to stack corruption or exceeding the stack size limit. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorHostMemoryAlreadyRegistered
This error indicates that the memory range passed to ::cudaHostRegister() has already been registered.
cudaErrorHostMemoryNotRegistered
This error indicates that the pointer passed to ::cudaHostUnregister() does not correspond to any currently registered memory region.
cudaErrorIllegalAddress
The device encountered a load or store instruction on an invalid memory address. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorIllegalInstruction
The device encountered an illegal instruction during kernel execution This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorIllegalState
This indicates that a resource required by the API call is not in a valid state to perform the requested operation.
cudaErrorIncompatibleDriverContext
This indicates that the current context is not compatible with this the CUDA Runtime. This can only occur if you are using CUDA Runtime/Driver interoperability and have created an existing Driver context using the driver API. The Driver context may be incompatible either because the Driver context was created using an older version of the API, because the Runtime API call expects a primary driver context and the Driver context is not primary, or because the Driver context has been destroyed. Please see \ref CUDART_DRIVER “Interactions with the CUDA Driver API” for more information.
cudaErrorInitializationError
The API call failed because the CUDA driver and runtime could not be initialized.
cudaErrorInsufficientDriver
This indicates that the installed NVIDIA CUDA driver is older than the CUDA runtime library. This is not a supported configuration. Users should install an updated NVIDIA display driver to allow the application to run.
cudaErrorInvalidAddressSpace
While executing a kernel, the device encountered an instruction which can only operate on memory locations in certain address spaces (global, shared, or local), but was supplied a memory address not belonging to an allowed address space. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorInvalidChannelDescriptor
This indicates that the channel descriptor passed to the API call is not valid. This occurs if the format is not one of the formats specified by ::cudaChannelFormatKind, or if one of the dimensions is invalid.
cudaErrorInvalidClusterSize
This indicates that a kernel launch error has occurred due to cluster misconfiguration.
cudaErrorInvalidConfiguration
This indicates that a kernel launch is requesting resources that can never be satisfied by the current device. Requesting more shared memory per block than the device supports will trigger this error, as will requesting too many threads or blocks. See ::cudaDeviceProp for more device limitations.
cudaErrorInvalidDevice
This indicates that the device ordinal supplied by the user does not correspond to a valid CUDA device or that the action requested is invalid for the specified device.
cudaErrorInvalidDeviceFunction
The requested device function does not exist or is not compiled for the proper device architecture.
cudaErrorInvalidDevicePointer
This indicates that at least one device pointer passed to the API call is not a valid device pointer. \deprecated This error return is deprecated as of CUDA 10.1.
cudaErrorInvalidFilterSetting
This indicates that a non-float texture was being accessed with linear filtering. This is not supported by CUDA.
cudaErrorInvalidGraphicsContext
This indicates an error with the OpenGL or DirectX context.
cudaErrorInvalidHostPointer
This indicates that at least one host pointer passed to the API call is not a valid host pointer. \deprecated This error return is deprecated as of CUDA 10.1.
cudaErrorInvalidKernelImage
This indicates that the device kernel image is invalid.
cudaErrorInvalidMemcpyDirection
This indicates that the direction of the memcpy passed to the API call is not one of the types specified by ::cudaMemcpyKind.
cudaErrorInvalidNormSetting
This indicates that an attempt was made to read a non-float texture as a normalized float. This is not supported by CUDA.
cudaErrorInvalidPc
The device encountered an invalid program counter. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorInvalidPitchValue
This indicates that one or more of the pitch-related parameters passed to the API call is not within the acceptable range for pitch.
cudaErrorInvalidPtx
A PTX compilation failed. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device.
cudaErrorInvalidResourceConfiguration
This error indicates one or more resources are insufficient or non-applicable for the operation.
cudaErrorInvalidResourceHandle
This indicates that a resource handle passed to the API call was not valid. Resource handles are opaque types like ::cudaStream_t and ::cudaEvent_t.
cudaErrorInvalidResourceType
This error indicates one or more resources passed in are not valid resource types for the operation.
cudaErrorInvalidSource
This indicates that the device kernel source is invalid.
cudaErrorInvalidSurface
This indicates that the surface passed to the API call is not a valid surface.
cudaErrorInvalidSymbol
This indicates that the symbol name/identifier passed to the API call is not a valid name or identifier.
cudaErrorInvalidTexture
This indicates that the texture passed to the API call is not a valid texture.
cudaErrorInvalidTextureBinding
This indicates that the texture binding is not valid. This occurs if you call ::cudaGetTextureAlignmentOffset() with an unbound texture.
cudaErrorInvalidValue
This indicates that one or more of the parameters passed to the API call is not within an acceptable range of values.
cudaErrorJitCompilationDisabled
This indicates that the JIT compilation was disabled. The JIT compilation compiles PTX. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device.
cudaErrorJitCompilerNotFound
This indicates that the PTX JIT compiler library was not found. The JIT Compiler library is used for PTX compilation. The runtime may fall back to compiling PTX if an application does not contain a suitable binary for the current device.
cudaErrorLaunchFailure
An exception occurred on the device while executing a kernel. Common causes include dereferencing an invalid device pointer and accessing out of bounds shared memory. Less common cases can be system specific - more information about these cases can be found in the system specific user guide. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorLaunchFileScopedSurf
This error indicates that a grid launch did not occur because the kernel uses file-scoped surfaces which are unsupported by the device runtime. Kernels launched via the device runtime only support surfaces created with the Surface Object API’s.
cudaErrorLaunchFileScopedTex
This error indicates that a grid launch did not occur because the kernel uses file-scoped textures which are unsupported by the device runtime. Kernels launched via the device runtime only support textures created with the Texture Object API’s.
cudaErrorLaunchIncompatibleTexturing
This error indicates a kernel launch that uses an incompatible texturing mode.
cudaErrorLaunchMaxDepthExceeded
This error indicates that a device runtime grid launch did not occur because the depth of the child grid would exceed the maximum supported number of nested grid launches.
cudaErrorLaunchOutOfResources
This indicates that a launch did not occur because it did not have appropriate resources. Although this error is similar to ::cudaErrorInvalidConfiguration, this error usually indicates that the user has attempted to pass too many arguments to the device kernel, or the kernel launch specifies too many threads for the kernel’s register count.
cudaErrorLaunchPendingCountExceeded
This error indicates that a device runtime grid launch failed because the launch would exceed the limit ::cudaLimitDevRuntimePendingLaunchCount. For this launch to proceed successfully, ::cudaDeviceSetLimit must be called to set the ::cudaLimitDevRuntimePendingLaunchCount to be higher than the upper bound of outstanding launches that can be issued to the device runtime. Keep in mind that raising the limit of pending device runtime launches will require the runtime to reserve device memory that cannot be used for user allocations.
cudaErrorLaunchTimeout
This indicates that the device kernel took too long to execute. This can only occur if timeouts are enabled - see the device property \ref ::cudaDeviceProp::kernelExecTimeoutEnabled “kernelExecTimeoutEnabled” for more information. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorLossyQuery
This indicates an attempt was made to introspect an object in a way that would discard semantically important information. This is either due to the object using funtionality newer than the API version used to introspect it or omission of optional return arguments.
cudaErrorMapBufferObjectFailed
This indicates that the buffer object could not be mapped.
cudaErrorMemoryAllocation
The API call failed because it was unable to allocate enough memory or other resources to perform the requested operation.
cudaErrorMemoryValueTooLarge
This indicated that an emulated device pointer exceeded the 32-bit address range. \deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.
cudaErrorMisalignedAddress
The device encountered a load or store instruction on a memory address which is not aligned. This leaves the process in an inconsistent state and any further CUDA work will return the same error. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorMissingConfiguration
The device function being invoked (usually via ::cudaLaunchKernel()) was not previously configured via the ::cudaConfigureCall() function.
cudaErrorMixedDeviceExecution
Mixing of device and device emulation code was not allowed. \deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.
cudaErrorMpsClientTerminated
This error indicates that the MPS client has been terminated by the server. To continue using CUDA, the process must be terminated and relaunched.
cudaErrorMpsConnectionFailed
This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server.
cudaErrorMpsMaxClientsReached
This error indicates that the hardware resources required to create MPS client have been exhausted.
cudaErrorMpsMaxConnectionsReached
This error indicates the the hardware resources required to device connections have been exhausted.
cudaErrorMpsRpcFailure
This error indicates that the remote procedural call between the MPS server and the MPS client failed.
cudaErrorMpsServerNotReady
This error indicates that the MPS server is not ready to accept new MPS client requests. This error can be returned when the MPS server is in the process of recovering from a fatal failure.
cudaErrorNoDevice
This indicates that no CUDA-capable devices were detected by the installed CUDA driver.
cudaErrorNoKernelImageForDevice
This indicates that there is no kernel image available that is suitable for the device. This can occur when a user specifies code generation options for a particular CUDA source file that do not include the corresponding device configuration.
cudaErrorNotMapped
This indicates that a resource is not mapped.
cudaErrorNotMappedAsArray
This indicates that a mapped resource is not available for access as an array.
cudaErrorNotMappedAsPointer
This indicates that a mapped resource is not available for access as a pointer.
cudaErrorNotPermitted
This error indicates the attempted operation is not permitted.
cudaErrorNotReady
This indicates that asynchronous operations issued previously have not completed yet. This result is not actually an error, but must be indicated differently than ::cudaSuccess (which indicates completion). Calls that may return this value include ::cudaEventQuery() and ::cudaStreamQuery().
cudaErrorNotSupported
This error indicates the attempted operation is not supported on the current system or device.
cudaErrorNotYetImplemented
This indicates that the API call is not yet implemented. Production releases of CUDA will never return this error. \deprecated This error return is deprecated as of CUDA 4.1.
cudaErrorNvlinkUncorrectable
This indicates that an uncorrectable NVLink error was detected during the execution.
cudaErrorOperatingSystem
This error indicates that an OS call failed.
cudaErrorPeerAccessAlreadyEnabled
This error indicates that a call to ::cudaDeviceEnablePeerAccess() is trying to re-enable peer addressing on from a context which has already had peer addressing enabled.
cudaErrorPeerAccessNotEnabled
This error indicates that ::cudaDeviceDisablePeerAccess() is trying to disable peer addressing which has not been enabled yet via ::cudaDeviceEnablePeerAccess().
cudaErrorPeerAccessUnsupported
This error indicates that P2P access is not supported across the given devices.
cudaErrorPriorLaunchFailure
This indicated that a previous kernel launch failed. This was previously used for device emulation of kernel launches. \deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.
cudaErrorProfilerAlreadyStarted
\deprecated This error return is deprecated as of CUDA 5.0. It is no longer an error to call cudaProfilerStart() when profiling is already enabled.
cudaErrorProfilerAlreadyStopped
\deprecated This error return is deprecated as of CUDA 5.0. It is no longer an error to call cudaProfilerStop() when profiling is already disabled.
cudaErrorProfilerDisabled
This indicates profiler is not initialized for this run. This can happen when the application is running with external profiling tools like visual profiler.
cudaErrorProfilerNotInitialized
\deprecated This error return is deprecated as of CUDA 5.0. It is no longer an error to attempt to enable/disable the profiling via ::cudaProfilerStart or ::cudaProfilerStop without initialization.
cudaErrorSetOnActiveProcess
This indicates that the user has called ::cudaSetValidDevices(), ::cudaSetDeviceFlags(), ::cudaD3D9SetDirect3DDevice(), ::cudaD3D10SetDirect3DDevice, ::cudaD3D11SetDirect3DDevice(), or ::cudaVDPAUSetVDPAUDevice() after initializing the CUDA runtime by calling non-device management operations (allocating memory and launching kernels are examples of non-device management operations). This error can also be returned if using runtime/driver interoperability and there is an existing ::CUcontext active on the host thread.
cudaErrorSharedObjectInitFailed
This indicates that initialization of a shared object failed.
cudaErrorSharedObjectSymbolNotFound
This indicates that a link to a shared object failed to resolve.
cudaErrorSoftwareValidityNotEstablished
By default, the CUDA runtime may perform a minimal set of self-tests, as well as CUDA driver tests, to establish the validity of both. Introduced in CUDA 11.2, this error return indicates that at least one of these tests has failed and the validity of either the runtime or the driver could not be established.
cudaErrorStartupFailure
This indicates an internal startup failure in the CUDA runtime.
cudaErrorStreamCaptureImplicit
The operation would have resulted in a disallowed implicit dependency on a current capture sequence from cudaStreamLegacy.
cudaErrorStreamCaptureInvalidated
The current capture sequence on the stream has been invalidated due to a previous error.
cudaErrorStreamCaptureIsolation
A dependency would have been created which crosses the capture sequence boundary. Only implicit in-stream ordering dependencies are allowed to cross the boundary.
cudaErrorStreamCaptureMerge
The operation would have resulted in a merge of two independent capture sequences.
cudaErrorStreamCaptureUnjoined
The capture sequence contains a fork that was not joined to the primary stream.
cudaErrorStreamCaptureUnmatched
The capture was not initiated in this stream.
cudaErrorStreamCaptureUnsupported
The operation is not permitted when the stream is capturing.
cudaErrorStreamCaptureWrongThread
A stream capture sequence not initiated with the ::cudaStreamCaptureModeRelaxed argument to ::cudaStreamBeginCapture was passed to ::cudaStreamEndCapture in a different thread.
cudaErrorStubLibrary
This indicates that the CUDA driver that the application has loaded is a stub library. Applications that run with the stub rather than a real driver loaded will result in CUDA API returning this error.
cudaErrorSymbolNotFound
This indicates that a named symbol was not found. Examples of symbols are global/constant variable names, driver function names, texture names, and surface names.
cudaErrorSyncDepthExceeded
This error indicates that a call to ::cudaDeviceSynchronize made from the device runtime failed because the call was made at grid depth greater than than either the default (2 levels of grids) or user specified device limit ::cudaLimitDevRuntimeSyncDepth. To be able to synchronize on launched grids at a greater depth successfully, the maximum nested depth at which ::cudaDeviceSynchronize will be called must be specified with the ::cudaLimitDevRuntimeSyncDepth limit to the ::cudaDeviceSetLimit api before the host-side launch of a kernel using the device runtime. Keep in mind that additional levels of sync depth require the runtime to reserve large amounts of device memory that cannot be used for user allocations. Note that ::cudaDeviceSynchronize made from device runtime is only supported on devices of compute capability < 9.0.
cudaErrorSynchronizationError
This indicated that a synchronization operation had failed. This was previously used for some device emulation functions. \deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.
cudaErrorSystemDriverMismatch
This error indicates that there is a mismatch between the versions of the display driver and the CUDA driver. Refer to the compatibility documentation for supported versions.
cudaErrorSystemNotReady
This error indicates that the system is not yet ready to start any CUDA work. To continue using CUDA, verify the system configuration is in a valid state and all required driver daemons are actively running. More information about this error can be found in the system specific user guide.
cudaErrorTextureFetchFailed
This indicated that a texture fetch was not able to be performed. This was previously used for device emulation of texture operations. \deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.
cudaErrorTextureNotBound
This indicated that a texture was not bound for access. This was previously used for device emulation of texture operations. \deprecated This error return is deprecated as of CUDA 3.1. Device emulation mode was removed with the CUDA 3.1 release.
cudaErrorTimeout
This indicates that the wait operation has timed out.
cudaErrorTooManyPeers
This error indicates that the hardware resources required to enable peer access have been exhausted for one or more of the devices passed to ::cudaEnablePeerAccess().
cudaErrorUnknown
This indicates that an unknown internal error has occurred.
cudaErrorUnmapBufferObjectFailed
This indicates that the buffer object could not be unmapped.
cudaErrorUnsupportedDevSideSync
This indicates that the code to be compiled by the PTX JIT contains unsupported call to cudaDeviceSynchronize.
cudaErrorUnsupportedExecAffinity
This indicates that the provided execution affinity is not supported by the device.
cudaErrorUnsupportedLimit
This indicates that the ::cudaLimit passed to the API call is not supported by the active device.
cudaErrorUnsupportedPtxVersion
This indicates that the provided PTX was compiled with an unsupported toolchain. The most common reason for this, is the PTX was generated by a compiler newer than what is supported by the CUDA driver and PTX JIT compiler.
cudaSuccess
The API call returned with no errors. In the case of query calls, this also means that the operation being queried is complete (see ::cudaEventQuery() and ::cudaStreamQuery()).

Functions§

completion_cache_add
completion_cache_destroy
completion_cache_find
completion_cache_init
cqe_poll
create_mlx5dv_cq
create_mlx5dv_qp
create_mlx5dv_recv_cq
create_mlx5dv_send_cq
create_qp
db_ring
deregister_segments
get_cuda_pci_address_from_ptr
ibv_ack_async_event
ibv_ack_cq_events
ibv_alloc_pd
ibv_attach_mcast
ibv_close_device
ibv_create_ah
ibv_create_ah_from_wc
ibv_create_comp_channel
ibv_create_cq
ibv_create_qp
ibv_create_srq
ibv_dealloc_pd
ibv_dereg_mr
ibv_destroy_ah
ibv_destroy_comp_channel
ibv_destroy_cq
ibv_destroy_qp
ibv_destroy_srq
ibv_detach_mcast
ibv_event_type_str
ibv_fork_init
ibv_free_device_list
ibv_get_async_event
ibv_get_cq_event
ibv_get_device_guid
ibv_get_device_index
ibv_get_device_list
ibv_get_device_name
ibv_get_pkey_index
ibv_import_device
ibv_import_dm
ibv_import_mr
ibv_import_pd
ibv_init_ah_from_wc
ibv_is_fork_initialized
ibv_modify_qp
ibv_modify_srq
ibv_node_type_str
ibv_open_device
ibv_port_state_str
ibv_qp_to_qp_ex
ibv_query_device
ibv_query_ece
ibv_query_gid
ibv_query_pkey
ibv_query_port
ibv_query_qp
ibv_query_qp_data_in_order
ibv_query_srq
ibv_rate_to_mbps
ibv_rate_to_mult
ibv_reg_dmabuf_mr
ibv_reg_mr
ibv_reg_mr_iova
ibv_reg_mr_iova2
ibv_rereg_mr
ibv_resize_cq
ibv_resolve_eth_l2_from_gid
ibv_set_ece
ibv_unimport_dm
ibv_unimport_mr
ibv_unimport_pd
ibv_wc_status_str
launch_cqe_poll
launch_db_ring
launch_recv_wqe
launch_send_wqe
mlx5dv_alloc_dm
mlx5dv_alloc_var
mlx5dv_create_cq
mlx5dv_create_flow
mlx5dv_create_flow_action_esp
mlx5dv_create_flow_action_modify_header
mlx5dv_create_flow_action_packet_reformat
mlx5dv_create_flow_matcher
mlx5dv_create_mkey
mlx5dv_create_qp
mlx5dv_create_steering_anchor
mlx5dv_create_wq
mlx5dv_crypto_login
mlx5dv_crypto_login_create
mlx5dv_crypto_login_destroy
mlx5dv_crypto_login_query
mlx5dv_crypto_login_query_state
mlx5dv_crypto_logout
mlx5dv_dci_stream_id_reset
mlx5dv_dek_create
mlx5dv_dek_destroy
mlx5dv_dek_query
mlx5dv_destroy_flow_matcher
mlx5dv_destroy_mkey
mlx5dv_destroy_steering_anchor
mlx5dv_devx_alloc_msi_vector
mlx5dv_devx_alloc_uar
mlx5dv_devx_cq_modify
mlx5dv_devx_cq_query
mlx5dv_devx_create_cmd_comp
mlx5dv_devx_create_eq
mlx5dv_devx_create_event_channel
mlx5dv_devx_destroy_cmd_comp
mlx5dv_devx_destroy_eq
mlx5dv_devx_destroy_event_channel
mlx5dv_devx_free_msi_vector
mlx5dv_devx_free_uar
mlx5dv_devx_general_cmd
mlx5dv_devx_get_async_cmd_comp
mlx5dv_devx_get_event
mlx5dv_devx_ind_tbl_modify
mlx5dv_devx_ind_tbl_query
mlx5dv_devx_obj_create
mlx5dv_devx_obj_destroy
mlx5dv_devx_obj_modify
mlx5dv_devx_obj_query
mlx5dv_devx_obj_query_async
mlx5dv_devx_qp_modify
mlx5dv_devx_qp_query
mlx5dv_devx_query_eqn
mlx5dv_devx_srq_modify
mlx5dv_devx_srq_query
mlx5dv_devx_subscribe_devx_event
mlx5dv_devx_subscribe_devx_event_fd
mlx5dv_devx_umem_dereg
mlx5dv_devx_umem_reg
mlx5dv_devx_umem_reg_ex
mlx5dv_devx_wq_modify
mlx5dv_devx_wq_query
mlx5dv_dm_map_op_addr
mlx5dv_dr_action_create_aso
mlx5dv_dr_action_create_default_miss
mlx5dv_dr_action_create_dest_array
mlx5dv_dr_action_create_dest_devx_tir
mlx5dv_dr_action_create_dest_ib_port
mlx5dv_dr_action_create_dest_ibv_qp
mlx5dv_dr_action_create_dest_root_table
mlx5dv_dr_action_create_dest_table
mlx5dv_dr_action_create_dest_vport
mlx5dv_dr_action_create_drop
mlx5dv_dr_action_create_flow_counter
mlx5dv_dr_action_create_flow_meter
mlx5dv_dr_action_create_flow_sampler
mlx5dv_dr_action_create_modify_header
mlx5dv_dr_action_create_packet_reformat
mlx5dv_dr_action_create_pop_vlan
mlx5dv_dr_action_create_push_vlan
mlx5dv_dr_action_create_tag
mlx5dv_dr_action_destroy
mlx5dv_dr_action_modify_aso
mlx5dv_dr_action_modify_flow_meter
mlx5dv_dr_aso_other_domain_link
mlx5dv_dr_aso_other_domain_unlink
mlx5dv_dr_domain_allow_duplicate_rules
mlx5dv_dr_domain_create
mlx5dv_dr_domain_destroy
mlx5dv_dr_domain_set_reclaim_device_memory
mlx5dv_dr_domain_sync
mlx5dv_dr_matcher_create
mlx5dv_dr_matcher_destroy
mlx5dv_dr_matcher_set_layout
mlx5dv_dr_rule_create
mlx5dv_dr_rule_destroy
mlx5dv_dr_table_create
mlx5dv_dr_table_destroy
mlx5dv_dump_dr_domain
mlx5dv_dump_dr_matcher
mlx5dv_dump_dr_rule
mlx5dv_dump_dr_table
mlx5dv_free_var
mlx5dv_get_clock_info
mlx5dv_get_vfio_device_list
mlx5dv_init_obj
mlx5dv_is_supported
mlx5dv_map_ah_to_qp
mlx5dv_modify_qp_lag_port
mlx5dv_modify_qp_sched_elem
mlx5dv_modify_qp_udp_sport
mlx5dv_open_device
mlx5dv_pp_alloc
mlx5dv_pp_free
mlx5dv_qp_cancel_posted_send_wrs
mlx5dv_qp_ex_from_ibv_qp_ex
mlx5dv_query_device
mlx5dv_query_qp_lag_port
mlx5dv_reserved_qpn_alloc
mlx5dv_reserved_qpn_dealloc
mlx5dv_sched_leaf_create
mlx5dv_sched_leaf_destroy
mlx5dv_sched_leaf_modify
mlx5dv_sched_node_create
mlx5dv_sched_node_destroy
mlx5dv_sched_node_modify
mlx5dv_set_context_attr
mlx5dv_vfio_get_events_fd
mlx5dv_vfio_process_events
poll_cq_with_cache
pt_cuda_allocator_compatibility
rdma_get_active_segment_count
rdma_get_all_segment_info
rdmaxcel_cuCtxCreate_v2
rdmaxcel_cuCtxSetCurrent
rdmaxcel_cuDeviceGet
rdmaxcel_cuDeviceGetAttribute
rdmaxcel_cuDeviceGetCount
rdmaxcel_cuGetErrorString
rdmaxcel_cuInit
rdmaxcel_cuMemAddressFree
rdmaxcel_cuMemAddressReserve
rdmaxcel_cuMemCreate
rdmaxcel_cuMemGetAllocationGranularity
rdmaxcel_cuMemGetHandleForAddressRange
rdmaxcel_cuMemMap
rdmaxcel_cuMemRelease
rdmaxcel_cuMemSetAccess
rdmaxcel_cuMemUnmap
rdmaxcel_cuMemcpyDtoH_v2
rdmaxcel_cuMemcpyHtoD_v2
rdmaxcel_cuMemsetD8_v2
rdmaxcel_cuPointerGetAttribute
rdmaxcel_error_string
rdmaxcel_print_device_info
Debug: Print comprehensive device attributes
rdmaxcel_qp_create
rdmaxcel_qp_destroy
rdmaxcel_qp_fetch_add_recv_cq_idx
rdmaxcel_qp_fetch_add_recv_db_idx
rdmaxcel_qp_fetch_add_recv_wqe_idx
rdmaxcel_qp_fetch_add_send_cq_idx
rdmaxcel_qp_fetch_add_send_db_idx
rdmaxcel_qp_fetch_add_send_wqe_idx
rdmaxcel_qp_get_ibv_qp
rdmaxcel_qp_get_recv_cache
rdmaxcel_qp_get_send_cache
rdmaxcel_qp_load_recv_cq_idx
rdmaxcel_qp_load_recv_wqe_idx
rdmaxcel_qp_load_rts_timestamp
rdmaxcel_qp_load_send_cq_idx
rdmaxcel_qp_load_send_db_idx
rdmaxcel_qp_load_send_wqe_idx
rdmaxcel_qp_store_rts_timestamp
rdmaxcel_qp_store_send_db_idx
recv_wqe
register_cuda_memory
register_segments
send_wqe

Type Aliases§

CUcontext
CUdevice
CUdevice_attribute
Device properties Device properties
CUdevice_attribute_enum
Device properties
CUdevice_v1
CUdeviceptr
CUdeviceptr_v2
CUmemAccessDesc
Memory access descriptor
CUmemAccessDesc_v1
Memory access descriptor
CUmemAccess_flags
Specifies the memory protection flags for mapping. Specifies the memory protection flags for mapping.
CUmemAccess_flags_enum
Specifies the memory protection flags for mapping.
CUmemAllocationGranularity_flags
Flag for requesting different optimal and required granularities for an allocation. Flag for requesting different optimal and required granularities for an allocation.
CUmemAllocationGranularity_flags_enum
Flag for requesting different optimal and required granularities for an allocation.
CUmemAllocationHandleType
Flags for specifying particular handle types Flags for specifying particular handle types
CUmemAllocationHandleType_enum
Flags for specifying particular handle types
CUmemAllocationProp
Specifies the allocation properties for a allocation.
CUmemAllocationProp_v1
Specifies the allocation properties for a allocation.
CUmemAllocationType
Defines the allocation types available Defines the allocation types available
CUmemAllocationType_enum
Defines the allocation types available
CUmemGenericAllocationHandle
CUmemGenericAllocationHandle_v1
CUmemLocation
Specifies a memory location.
CUmemLocationType
Specifies the type of location Specifies the type of location
CUmemLocationType_enum
Specifies the type of location
CUmemLocation_v1
Specifies a memory location.
CUmemRangeHandleType
Specifies the handle type for address range Specifies the handle type for address range
CUmemRangeHandleType_enum
Specifies the handle type for address range
CUpointer_attribute
Pointer information Pointer information
CUpointer_attribute_enum
Pointer information
CUresult
Error codes Error codes
FILE
_IO_lock_t
__be16
__be32
__be64
__off64_t
__off_t
__pthread_list_t
__syscall_slong_t
__time_t
__u8
__u16
__u32
__u64
_bindgen_ty_44
_bindgen_ty_45
_bindgen_ty_46
_bindgen_ty_47
_bindgen_ty_48
_bindgen_ty_49
_bindgen_ty_50
_bindgen_ty_51
_bindgen_ty_52
_bindgen_ty_53
_bindgen_ty_54
_bindgen_ty_55
_bindgen_ty_56
_bindgen_ty_57
_bindgen_ty_58
_bindgen_ty_59
_bindgen_ty_60
_bindgen_ty_61
_bindgen_ty_62
_bindgen_ty_63
_bindgen_ty_64
_bindgen_ty_65
_bindgen_ty_66
_bindgen_ty_67
_bindgen_ty_68
_bindgen_ty_69
_bindgen_ty_70
completion_cache_t
completion_node_t
cqe_poll_result_t
cudaError
CUDA error types
cudaError_enum
Error codes
cudaError_t
CUDA Error types CUDA error types
ib_uverbs_flow_action_esp_keymat
ib_uverbs_flow_action_esp_replay
ibv_atomic_cap
ibv_counter_description
ibv_cq_attr_mask
ibv_cq_init_attr_mask
ibv_create_cq_attr_flags
ibv_create_cq_wc_flags
ibv_device_cap_flags
ibv_dm_mask
ibv_event_type
ibv_flow_action_esp_mask
ibv_flow_attr_type
ibv_flow_flags
ibv_flow_spec_type
ibv_fork_status
ibv_gid_type
ibv_ind_table_init_attr_mask
ibv_mig_state
ibv_mtu
ibv_mw_type
ibv_node_type
ibv_odp_general_caps
ibv_odp_transport_cap_bits
ibv_ops_flags
ibv_ops_wr_opcode
ibv_parent_domain_init_attr_mask
ibv_pci_atomic_op_size
ibv_placement_type
ibv_port_cap_flags2
ibv_qp_create_flags
ibv_qp_create_send_ops_flags
ibv_qp_init_attr_mask
ibv_qp_open_attr_mask
ibv_query_qp_data_in_order_caps
ibv_query_qp_data_in_order_flags
ibv_rate
ibv_raw_packet_caps
ibv_read_counters_flags
ibv_rereg_mr_err_code
ibv_rereg_mr_flags
ibv_rx_hash_fields
ibv_rx_hash_function_flags
ibv_selectivity_level
ibv_srq_attr_mask
ibv_srq_init_attr_mask
ibv_srq_type
ibv_tm_cap_flags
ibv_tmh_op
ibv_transport_type
ibv_values_mask
ibv_wq_attr_mask
ibv_wq_flags
ibv_wq_init_attr_mask
ibv_wq_state
ibv_wq_type
ibv_xrcd_init_attr_mask
mlx5_ib_uapi_devx_create_event_channel_flags
mlx5_ib_uapi_dm_type
mlx5_ib_uapi_flow_action_packet_reformat_type
mlx5_ib_uapi_flow_table_type
mlx5dv_block_size
mlx5dv_block_size_caps
mlx5dv_context_attr_flags
mlx5dv_context_comp_mask
mlx5dv_context_flags
mlx5dv_cq_init_attr_flags
mlx5dv_cq_init_attr_mask
mlx5dv_cqe_comp_res_format
mlx5dv_crypto_caps_flags
mlx5dv_crypto_engines_caps
mlx5dv_crypto_key_purpose
mlx5dv_crypto_key_size
mlx5dv_crypto_login_state
mlx5dv_crypto_standard
mlx5dv_crypto_wrapped_import_method_caps
mlx5dv_dc_type
mlx5dv_dek_init_attr_mask
mlx5dv_dek_state
mlx5dv_devx_umem_in_mask
mlx5dv_dm_comp_mask
mlx5dv_dr_action_aso_ct_flags
mlx5dv_dr_action_aso_first_hit_flags
mlx5dv_dr_action_aso_flow_meter_flags
mlx5dv_dr_action_dest_type
mlx5dv_dr_action_flags
mlx5dv_dr_domain_sync_flags
mlx5dv_dr_domain_type
mlx5dv_dr_matcher_layout_flags
mlx5dv_flow_action_cap_flags
mlx5dv_flow_action_esp_mask
mlx5dv_flow_action_type
mlx5dv_flow_matcher_attr_mask
mlx5dv_mkey_conf_flags
mlx5dv_mkey_err_type
mlx5dv_mkey_init_attr_flags
mlx5dv_obj_type
mlx5dv_qp_comp_mask
mlx5dv_qp_create_flags
mlx5dv_qp_create_send_ops_flags
mlx5dv_qp_init_attr_mask
mlx5dv_sched_elem_attr_flags
mlx5dv_set_ctx_attr_type
mlx5dv_sig_block_attr_flags
mlx5dv_sig_crc_type
mlx5dv_sig_crc_type_caps
mlx5dv_sig_mask
mlx5dv_sig_prot_caps
mlx5dv_sig_t10dif_bg_caps
mlx5dv_sig_t10dif_bg_type
mlx5dv_sig_t10dif_flags
mlx5dv_sig_type
mlx5dv_signature_crypto_order
mlx5dv_srq_comp_mask
mlx5dv_sw_parsing_offloads
mlx5dv_tunnel_offloads
mlx5dv_vfio_context_attr_flags
mlx5dv_wc_opcode
mlx5dv_wq_init_attr_mask
off_t
poll_context_t
rdma_qp_type_t
rdmaxcel_qp_t
std_atomic_value_type

Unions§

__pthread_cond_s__bindgen_ty_1
__pthread_cond_s__bindgen_ty_2
ib_uverbs_flow_action_esp_encap__bindgen_ty_1
ib_uverbs_flow_action_esp_encap__bindgen_ty_2
ibv_async_event__bindgen_ty_1
ibv_flow_spec__bindgen_ty_1
ibv_gid
ibv_send_wr__bindgen_ty_1
ibv_send_wr__bindgen_ty_2
ibv_send_wr__bindgen_ty_3
ibv_send_wr__bindgen_ty_4
ibv_wc__bindgen_ty_1
mlx5_wqe_av__bindgen_ty_1
mlx5_wqe_umr_ctrl_seg__bindgen_ty_1
mlx5_wqe_umr_inline_seg
mlx5dv_dc_init_attr__bindgen_ty_1
mlx5dv_dr_action_dest_attr__bindgen_ty_1
mlx5dv_flow_action_attr__bindgen_ty_1
mlx5dv_mkey_err__bindgen_ty_1
mlx5dv_sig_block_domain__bindgen_ty_1
pthread_cond_t
pthread_mutex_t