1
votes

In a shared-memory programming model, any global variables are visible to every threads.

In CUDA, constant memory declared in simliar ways like global variables in shared-memory systems, which make me a little bit worried:

Considering the following code:

__constant__ int array[1024];

void hostFunction(int DeviceID, cudaStream_t streamIdx)
{
    cudaSetDevice(DeviceID);
    someKernel<<<100,1024,0, streamIdx>>>(...);
    //The function someKernel will use data stored in array[] on current device;
};

Then, Is the contents of array[] local to each cuda context/devices, such that we can safely update each Devices's "private" array[] without worrying about changing the values of array[] allocated on other cuda devices?

BTW: I searched the site, there are some related questions, however I cannot find any clear answer from any of these.

1

1 Answers

6
votes

Then, Is the contents of array[] local to each cuda context/devices, such that we can safely update each Devices's "private" array[] without worrying about changing the values of array[] allocated on other cuda devices?

Yes, the single line of code

__constant__ int array[1024];

creates an allocation on each device that your program accesses.

You can then individually load the __constant__ memory on each device using, for example:

cudaSetDevice(0);
cudaMemcpyToSymbol(array, my_device_0_constant_data, 1024*sizeof(int));

and repeat the above for each device you wish to use.

Similar statements can be made about __device__ variables.

Here is a fully worked example:

$ cat t223.cu
#include <stdio.h>

#define cudaCheckErrors(msg) \
    do { \
        cudaError_t __err = cudaGetLastError(); \
        if (__err != cudaSuccess) { \
            fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
                msg, cudaGetErrorString(__err), \
                __FILE__, __LINE__); \
            fprintf(stderr, "*** FAILED - ABORTING\n"); \
            exit(1); \
        } \
    } while (0)

__constant__ int my_const_data;

__device__ int my_dev_data;

__global__ void my_kernel(int my_dev){

  printf("device %d constant data is: %d\n", my_dev, my_const_data);
  printf("device %d __device__ data is: %d\n", my_dev, my_dev_data);
}

int main(){

  int num_dev = 0;
  cudaGetDeviceCount(&num_dev);
  cudaCheckErrors("get device count fail");
  if (num_dev == 0) {printf("no cuda devices found!\n"); return 1;}
  for (int i = 0; i < num_dev; i++){
    int cdata = i;
    int ddata = 10*i;
    cudaSetDevice(i);
    cudaMemcpyToSymbol(my_const_data, &cdata, sizeof(int));
    cudaMemcpyToSymbol(my_dev_data, &ddata, sizeof(int));
    cudaCheckErrors("memcpy to symbol fail");}
  for (int i = 0; i < num_dev; i++){
    cudaSetDevice(i);
    my_kernel<<<1,1>>>(i);
    cudaDeviceSynchronize();}
  cudaCheckErrors("kernel fail");
  return 0;
}

$ nvcc -arch=sm_20 -o t223 t223.cu
$ ./t223
device 0 constant data is: 0
device 0 __device__ data is: 0
device 1 constant data is: 1
device 1 __device__ data is: 10
device 2 constant data is: 2
device 2 __device__ data is: 20
device 3 constant data is: 3
device 3 __device__ data is: 30
$