2
votes

CUDA 5, device capabilities 3.5, VS 2012, 64bit Win 2012 Server.

There is no shared memory access between threads, every thread is standalone.

I am using pinned memory with zero-copy. From the host, I can only read the pinned memory the device has written, only when I issue a cudaDeviceSynchronize on the host.

I want to be able to:

  1. Flush into the pinned memory as soon as the device has updated it.
  2. Not block the device thread (maybe by copying asynchronously)

I tried calling __threadfence_system and __threadfence after each device write, but that didn't flush.

Below is a full sample CUDA code that demonstrates my question:

#include <conio.h>
#include <cstdio>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"

__global__ void Kernel(volatile float* hResult) 
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;

    printf("Kernel %u: Before Writing in Kernel\n", tid);
    hResult[tid] = tid + 1;
    __threadfence_system();
    // expecting that the data is getting flushed to host here!
    printf("Kernel %u: After Writing in Kernel\n", tid);
    // time waster for-loop (sleep)
    for (int timeWater = 0; timeWater  < 100000000; timeWater++);
}

void main()
{
    size_t blocks = 2;
    volatile float* hResult;
    cudaHostAlloc((void**)&hResult,blocks*sizeof(float),cudaHostAllocMapped);
    Kernel<<<1,blocks>>>(hResult);
    int filledElementsCounter = 0;
    // naiive thread implementation that can be impelemted using 
    // another host thread
    while (filledElementsCounter < blocks) 
    {
        // blocks until the value changes, this moves sequentially 
        // while threads have no order (fine for this sample).
        while(hResult[filledElementsCounter] == 0);
        printf("%f\n", hResult[filledElementsCounter]);;
        filledElementsCounter++;
    }
    cudaFreeHost((void *)hResult);
    system("pause");
}

Currently this sample will wait indefinitely as nothing is being read from the device unless I issue cudaDeviceSynchronize. The sample below works, but it is NOT what I want as it defeats the purpose of async copying:

void main()
{
    size_t blocks = 2;
    volatile float* hResult;
    cudaHostAlloc((void**)&hResult, blocks*sizeof(float), cudaHostAllocMapped);
    Kernel<<<1,blocks>>>(hResult);
    cudaError_t error = cudaDeviceSynchronize();
    if (error != cudaSuccess) { throw; }
    for(int i = 0; i < blocks; i++) 
    {
        printf("%f\n", hResult[i]);
    }
    cudaFreeHost((void *)hResult);
    system("pause");
}
3
Did you solve this problem? Did you try to use a dynamic parallelism to write data to the memory of CPU-host? Use cudaMemcpyAsync(uva_host_ptr, device_ptr, size); in kernel-function, as shown in the following link: on-demand.gputechconf.com/gtc/2012/presentations/…Alex

3 Answers

4
votes

I played with your code on a Centos 6.2 with CUDA 5.5 and a Tesla M2090 and can conclude this:

The problem that it does not work on your system must be a driver issue and I suggest that you get the TCC drivers.

I attached my code that runs fine and does what you want. The values appear on the host side before the kernel ends. As you can see I added some compute code to prevent the for loop to be removed due to compiler optimizations. I added a stream and a callback that get executed after all work in the stream is finished. The program outputs 1 2 and for a long time does nothing until stream finished... is printed to the console.

 #include <iostream>
 #include "cuda.h"
 #include "cuda_runtime.h"
 #include "device_launch_parameters.h"

 #define SEC_CUDA_CALL(val)           checkCall  ( (val), #val, __FILE__, __LINE__ )

 bool checkCall(cudaError_t result, char const* const func,  const char *const file, int const line)
 {
    if (result != cudaSuccess)
    {
            std::cout << "CUDA (runtime api) error: " << func << " failed! " << cudaGetErrorString(result) << " (" << result << ") " << file << ":" << line << std::endl;
    }
    return result != cudaSuccess;
}

class Callback
{
public:
    static void CUDART_CB dispatch(cudaStream_t stream, cudaError_t status, void *userData);

private:
    void call();
};

void CUDART_CB Callback::dispatch(cudaStream_t stream, cudaError_t status, void *userData)
{
    Callback* cb = (Callback*) userData;
    cb->call();
}

void Callback::call()
{
     std::cout << "stream finished..." << std::endl;
}



__global__ void Kernel(volatile float* hResult)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;

    hResult[tid] = tid + 1;
    __threadfence_system();
    float A = 0;
    for (int timeWater = 0; timeWater  < 100000000; timeWater++)
    {
        A = sin(cos(log(hResult[0] * hResult[1]))) + A;
        A = sqrt(A);
    }
}

int main(int argc, char* argv[])
{
    size_t blocks = 2;
    volatile float* hResult;
    SEC_CUDA_CALL(cudaHostAlloc((void**)&hResult,blocks*sizeof(float),cudaHostAllocMapped));

    cudaStream_t stream;
    SEC_CUDA_CALL(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
    Callback obj;
    Kernel<<<1,blocks,NULL,stream>>>(hResult);
    SEC_CUDA_CALL(cudaStreamAddCallback(stream, Callback::dispatch, &obj, 0));

    int filledElementsCounter = 0;

    while (filledElementsCounter < blocks)
    {
        while(hResult[filledElementsCounter] == 0);
        std::cout << hResult[filledElementsCounter] << std::endl;
        filledElementsCounter++;
    }

    SEC_CUDA_CALL(cudaStreamDestroy(stream));
    SEC_CUDA_CALL(cudaFreeHost((void *)hResult));
}

No call returned an error and cuda-memcheck didn't find any problems. This works as intended. You should really try the TCC driver.

2
votes

You cannot pass the host pointer directly to the kernel. If you allocate host memory using cudaHostAlloc with cudaHostAllocMapped flag, then first you have to retrieve the device pointer of the mapped host memory before you can use it in the kernel. Use cudaHostGetDevicePointer to get the device pointer of mapped host memory.

float* hResult, *dResult;
cudaHostAlloc((void**)&hResult, blocks*sizeof(float), cudaHostAllocMapped);
cudaHostGetDevicePointer(&dResult,hResult);
Kernel<<<1,blocks>>>(dResult);
2
votes

Calling __threadfence_system() will ensure that the write is visible to the system before proceeding, but your CPU will be caching the h_result variable and hence you're just spinning on the old value in an infinite loop. Try marking h_result as volatile.