I'm trying to play with mixing CUDA and C++. I encountered the following error:
main.cpp: define "main()". Call "gpu_main()" and "add_test()"
|
|--> add_func.cu: define "gpu_main()" and "__global__ void add()" as kernel. The "add()" will call "add_test()"
|
|--> basic_add.cu: define "__host__ __device__ int add_test(int a, int b)"
I compile the code this way:
nvcc basic_add.cu -c
nvcc -rdc=true add_func.cu -c
g++ main.cpp -c
g++ -o main main.o basic_add.o add_func.o -lcudart -L/usr/local/cuda/lib64
At the 2nd step, it gave me this error:
add_func.cu(14): error: calling a host function("add_test") from a global function("add") is not allowed
add_func.cu(14): error: identifier "add_test" is undefined in device code
Does anyone have any idea of how to fix this problem? Or I shouldn't call a host & device function from an external file? Thanks.
The code is as following (just for reference):
- basic_add.h:
#ifndef BASIC_ADD_H_
#define BASIC_ADD_H_
int add_test( int a, int b );
#endif
- basic_add.cu:
__host__ __device__ int add_test(int a, int b)
{
return a + b;
}
- add_func.h
#ifndef ADD_FUNC_H_
#define ADD_FUNC_H_
#include <iostream>
#include <math.h>
#include "basic_add.h"
int gpu_main(void);
#endif
- add_func.cu
#include "add_func.h"
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
printf("gridDim %d, blockDim %d, blockIdx %d, threadIdx %d\n", gridDim.x, blockDim.x, blockIdx.x, threadIdx.x);
for (int i = index; i < n; i += stride)
{
y[i] = add_test(x[i],y[i]);
printf("blockIdx %d, threadIdx %d, %d\n", blockIdx.x, threadIdx.x, i);
break;
}
}
int gpu_main(void)
{
int N = 1<<10;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
- main.cpp:
#include <iostream>
#include <math.h>
#include "add_func.h"
#include "basic_add.h"
int main(void)
{
gpu_main();
int a = add_test(1,2);
std::cout << a << std::endl;
return 0;
}