# Check CUDA installation
nvcc --version# Set CUDA path explicitly
cmake .. -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-12.2
# Or add to PATHexport PATH=/usr/local/cuda/bin:$PATHexport LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
CMake version too old
Error: CMake 3.18 or higher is required
Solutions:
class="highlight">
1
2
3
4
5
6
7
8
9
# Using pip
pip install--upgrade cmake
# Using snap (Ubuntu)sudo snap install cmake --classic# Build from source
curl -L https://cmake.org/files/v3.28/cmake-3.28.0.tar.gz | tar xz
cd cmake-3.28.0 && ./bootstrap && make &&sudo make install
C++17 not supported
Error: error: 'auto' in lambda parameter not supported
Solutions:
class="highlight">
1
2
3
4
5
6
7
8
9
# Check compiler version
gcc --version# Should be 9+
clang --version# Should be 10+# Specify compiler
cmake .. -DCMAKE_CXX_COMPILER=g++-11
# Or use environment variableCC=gcc-11 CXX=g++-11 cmake ..
CUDA architecture mismatch
Error: No kernel image is available for execution on the device
Solutions:
class="highlight">
1
2
3
4
5
6
7
8
9
10
# Check your GPU compute capability
nvidia-smi --query-gpu=compute_cap --format=csv
# Build for your specific architecture
cmake .. -DCUDA_ARCH="80"# For SM 8.0 (A100)
cmake .. -DCUDA_ARCH="86"# For SM 8.6 (RTX 3090)
cmake .. -DCUDA_ARCH="89"# For SM 8.9 (RTX 4090)# Or use native detection
cmake .. -DCUDA_ARCH="native"
Runtime Issues
CUDA out of memory
Error: CUDA out of memory or cudaErrorMemoryAllocation