Struct cudaDeviceProp

Source
#[repr(C)]
pub struct cudaDeviceProp {
Show 96 fields pub name: [c_char; 256], pub uuid: cudaUUID_t, pub luid: [c_char; 8], pub luidDeviceNodeMask: c_uint, pub totalGlobalMem: usize, pub sharedMemPerBlock: usize, pub regsPerBlock: c_int, pub warpSize: c_int, pub memPitch: usize, pub maxThreadsPerBlock: c_int, pub maxThreadsDim: [c_int; 3], pub maxGridSize: [c_int; 3], pub clockRate: c_int, pub totalConstMem: usize, pub major: c_int, pub minor: c_int, pub textureAlignment: usize, pub texturePitchAlignment: usize, pub deviceOverlap: c_int, pub multiProcessorCount: c_int, pub kernelExecTimeoutEnabled: c_int, pub integrated: c_int, pub canMapHostMemory: c_int, pub computeMode: c_int, pub maxTexture1D: c_int, pub maxTexture1DMipmap: c_int, pub maxTexture1DLinear: c_int, pub maxTexture2D: [c_int; 2], pub maxTexture2DMipmap: [c_int; 2], pub maxTexture2DLinear: [c_int; 3], pub maxTexture2DGather: [c_int; 2], pub maxTexture3D: [c_int; 3], pub maxTexture3DAlt: [c_int; 3], pub maxTextureCubemap: c_int, pub maxTexture1DLayered: [c_int; 2], pub maxTexture2DLayered: [c_int; 3], pub maxTextureCubemapLayered: [c_int; 2], pub maxSurface1D: c_int, pub maxSurface2D: [c_int; 2], pub maxSurface3D: [c_int; 3], pub maxSurface1DLayered: [c_int; 2], pub maxSurface2DLayered: [c_int; 3], pub maxSurfaceCubemap: c_int, pub maxSurfaceCubemapLayered: [c_int; 2], pub surfaceAlignment: usize, pub concurrentKernels: c_int, pub ECCEnabled: c_int, pub pciBusID: c_int, pub pciDeviceID: c_int, pub pciDomainID: c_int, pub tccDriver: c_int, pub asyncEngineCount: c_int, pub unifiedAddressing: c_int, pub memoryClockRate: c_int, pub memoryBusWidth: c_int, pub l2CacheSize: c_int, pub persistingL2CacheMaxSize: c_int, pub maxThreadsPerMultiProcessor: c_int, pub streamPrioritiesSupported: c_int, pub globalL1CacheSupported: c_int, pub localL1CacheSupported: c_int, pub sharedMemPerMultiprocessor: usize, pub regsPerMultiprocessor: c_int, pub managedMemory: c_int, pub isMultiGpuBoard: c_int, pub multiGpuBoardGroupID: c_int, pub hostNativeAtomicSupported: c_int, pub singleToDoublePrecisionPerfRatio: c_int, pub pageableMemoryAccess: c_int, pub concurrentManagedAccess: c_int, pub computePreemptionSupported: c_int, pub canUseHostPointerForRegisteredMem: c_int, pub cooperativeLaunch: c_int, pub cooperativeMultiDeviceLaunch: c_int, pub sharedMemPerBlockOptin: usize, pub pageableMemoryAccessUsesHostPageTables: c_int, pub directManagedMemAccessFromHost: c_int, pub maxBlocksPerMultiProcessor: c_int, pub accessPolicyMaxWindowSize: c_int, pub reservedSharedMemPerBlock: usize, pub hostRegisterSupported: c_int, pub sparseCudaArraySupported: c_int, pub hostRegisterReadOnlySupported: c_int, pub timelineSemaphoreInteropSupported: c_int, pub memoryPoolsSupported: c_int, pub gpuDirectRDMASupported: c_int, pub gpuDirectRDMAFlushWritesOptions: c_uint, pub gpuDirectRDMAWritesOrdering: c_int, pub memoryPoolSupportedHandleTypes: c_uint, pub deferredMappingCudaArraySupported: c_int, pub ipcEventSupported: c_int, pub clusterLaunch: c_int, pub unifiedFunctionPointers: c_int, pub reserved2: [c_int; 2], pub reserved1: [c_int; 1], pub reserved: [c_int; 60],
}
Expand description

CUDA device properties

Fields§

§name: [c_char; 256]

< ASCII string identifying device

§uuid: cudaUUID_t

< 16-byte unique identifier

§luid: [c_char; 8]

< 8-byte locally unique identifier. Value is undefined on TCC and non-Windows platforms

§luidDeviceNodeMask: c_uint

< LUID device node mask. Value is undefined on TCC and non-Windows platforms

§totalGlobalMem: usize

< Global memory available on device in bytes

§sharedMemPerBlock: usize

< Shared memory available per block in bytes

§regsPerBlock: c_int

< 32-bit registers available per block

§warpSize: c_int

< Warp size in threads

§memPitch: usize

< Maximum pitch in bytes allowed by memory copies

§maxThreadsPerBlock: c_int

< Maximum number of threads per block

§maxThreadsDim: [c_int; 3]

< Maximum size of each dimension of a block

§maxGridSize: [c_int; 3]

< Maximum size of each dimension of a grid

§clockRate: c_int

< Deprecated, Clock frequency in kilohertz

§totalConstMem: usize

< Constant memory available on device in bytes

§major: c_int

< Major compute capability

§minor: c_int

< Minor compute capability

§textureAlignment: usize

< Alignment requirement for textures

§texturePitchAlignment: usize

< Pitch alignment requirement for texture references bound to pitched memory

§deviceOverlap: c_int

< Device can concurrently copy memory and execute a kernel. Deprecated. Use instead asyncEngineCount.

§multiProcessorCount: c_int

< Number of multiprocessors on device

§kernelExecTimeoutEnabled: c_int

< Deprecated, Specified whether there is a run time limit on kernels

§integrated: c_int

< Device is integrated as opposed to discrete

§canMapHostMemory: c_int

< Device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer

§computeMode: c_int

< Deprecated, Compute mode (See ::cudaComputeMode)

§maxTexture1D: c_int

< Maximum 1D texture size

§maxTexture1DMipmap: c_int

< Maximum 1D mipmapped texture size

§maxTexture1DLinear: c_int

< Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead.

§maxTexture2D: [c_int; 2]

< Maximum 2D texture dimensions

§maxTexture2DMipmap: [c_int; 2]

< Maximum 2D mipmapped texture dimensions

§maxTexture2DLinear: [c_int; 3]

< Maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory

§maxTexture2DGather: [c_int; 2]

< Maximum 2D texture dimensions if texture gather operations have to be performed

§maxTexture3D: [c_int; 3]

< Maximum 3D texture dimensions

§maxTexture3DAlt: [c_int; 3]

< Maximum alternate 3D texture dimensions

§maxTextureCubemap: c_int

< Maximum Cubemap texture dimensions

§maxTexture1DLayered: [c_int; 2]

< Maximum 1D layered texture dimensions

§maxTexture2DLayered: [c_int; 3]

< Maximum 2D layered texture dimensions

§maxTextureCubemapLayered: [c_int; 2]

< Maximum Cubemap layered texture dimensions

§maxSurface1D: c_int

< Maximum 1D surface size

§maxSurface2D: [c_int; 2]

< Maximum 2D surface dimensions

§maxSurface3D: [c_int; 3]

< Maximum 3D surface dimensions

§maxSurface1DLayered: [c_int; 2]

< Maximum 1D layered surface dimensions

§maxSurface2DLayered: [c_int; 3]

< Maximum 2D layered surface dimensions

§maxSurfaceCubemap: c_int

< Maximum Cubemap surface dimensions

§maxSurfaceCubemapLayered: [c_int; 2]

< Maximum Cubemap layered surface dimensions

§surfaceAlignment: usize

< Alignment requirements for surfaces

§concurrentKernels: c_int

< Device can possibly execute multiple kernels concurrently

§ECCEnabled: c_int

< Device has ECC support enabled

§pciBusID: c_int

< PCI bus ID of the device

§pciDeviceID: c_int

< PCI device ID of the device

§pciDomainID: c_int

< PCI domain ID of the device

§tccDriver: c_int

< 1 if device is a Tesla device using TCC driver, 0 otherwise

§asyncEngineCount: c_int

< Number of asynchronous engines

§unifiedAddressing: c_int

< Device shares a unified address space with the host

§memoryClockRate: c_int

< Deprecated, Peak memory clock frequency in kilohertz

§memoryBusWidth: c_int

< Global memory bus width in bits

§l2CacheSize: c_int

< Size of L2 cache in bytes

§persistingL2CacheMaxSize: c_int

< Device’s maximum l2 persisting lines capacity setting in bytes

§maxThreadsPerMultiProcessor: c_int

< Maximum resident threads per multiprocessor

§streamPrioritiesSupported: c_int

< Device supports stream priorities

§globalL1CacheSupported: c_int

< Device supports caching globals in L1

§localL1CacheSupported: c_int

< Device supports caching locals in L1

§sharedMemPerMultiprocessor: usize

< Shared memory available per multiprocessor in bytes

§regsPerMultiprocessor: c_int

< 32-bit registers available per multiprocessor

§managedMemory: c_int

< Device supports allocating managed memory on this system

§isMultiGpuBoard: c_int

< Device is on a multi-GPU board

§multiGpuBoardGroupID: c_int

< Unique identifier for a group of devices on the same multi-GPU board

§hostNativeAtomicSupported: c_int

< Link between the device and the host supports native atomic operations

§singleToDoublePrecisionPerfRatio: c_int

< Deprecated, Ratio of single precision performance (in floating-point operations per second) to double precision performance

§pageableMemoryAccess: c_int

< Device supports coherently accessing pageable memory without calling cudaHostRegister on it

§concurrentManagedAccess: c_int

< Device can coherently access managed memory concurrently with the CPU

§computePreemptionSupported: c_int

< Device supports Compute Preemption

§canUseHostPointerForRegisteredMem: c_int

< Device can access host registered memory at the same virtual address as the CPU

§cooperativeLaunch: c_int

< Device supports launching cooperative kernels via ::cudaLaunchCooperativeKernel

§cooperativeMultiDeviceLaunch: c_int

< Deprecated, cudaLaunchCooperativeKernelMultiDevice is deprecated.

§sharedMemPerBlockOptin: usize

< Per device maximum shared memory per block usable by special opt in

§pageableMemoryAccessUsesHostPageTables: c_int

< Device accesses pageable memory via the host’s page tables

§directManagedMemAccessFromHost: c_int

< Host can directly access managed memory on the device without migration.

§maxBlocksPerMultiProcessor: c_int

< Maximum number of resident blocks per multiprocessor

§accessPolicyMaxWindowSize: c_int

< The maximum value of ::cudaAccessPolicyWindow::num_bytes.

§reservedSharedMemPerBlock: usize

< Shared memory reserved by CUDA driver per block in bytes

§hostRegisterSupported: c_int

< Device supports host memory registration via ::cudaHostRegister.

§sparseCudaArraySupported: c_int

< 1 if the device supports sparse CUDA arrays and sparse CUDA mipmapped arrays, 0 otherwise

§hostRegisterReadOnlySupported: c_int

< Device supports using the ::cudaHostRegister flag cudaHostRegisterReadOnly to register memory that must be mapped as read-only to the GPU

§timelineSemaphoreInteropSupported: c_int

< External timeline semaphore interop is supported on the device

§memoryPoolsSupported: c_int

< 1 if the device supports using the cudaMallocAsync and cudaMemPool family of APIs, 0 otherwise

§gpuDirectRDMASupported: c_int

< 1 if the device supports GPUDirect RDMA APIs, 0 otherwise

§gpuDirectRDMAFlushWritesOptions: c_uint

< Bitmask to be interpreted according to the ::cudaFlushGPUDirectRDMAWritesOptions enum

§gpuDirectRDMAWritesOrdering: c_int

< See the ::cudaGPUDirectRDMAWritesOrdering enum for numerical values

§memoryPoolSupportedHandleTypes: c_uint

< Bitmask of handle types supported with mempool-based IPC

§deferredMappingCudaArraySupported: c_int

< 1 if the device supports deferred mapping CUDA arrays and CUDA mipmapped arrays

§ipcEventSupported: c_int

< Device supports IPC Events.

§clusterLaunch: c_int

< Indicates device supports cluster launch

§unifiedFunctionPointers: c_int

< Indicates device supports unified pointers

§reserved2: [c_int; 2]§reserved1: [c_int; 1]

< Reserved for future use

§reserved: [c_int; 60]

< Reserved for future use

Trait Implementations§

Source§

impl Clone for cudaDeviceProp

Source§

fn clone(&self) -> cudaDeviceProp

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for cudaDeviceProp

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Copy for cudaDeviceProp

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.