cmake 生成 cuda 项目

cmake 生成的cuda 好处就是跨平台,少去各类配置。linux

1. CMakeLists.txt 代码以下:ios

# CMakeLists.txt for G4CU project
project(test_cuda_project)
# required cmake version
cmake_minimum_required(VERSION 2.8)
# packages
find_package(CUDA)

if(${CUDA_FOUND})
	include_directories(${CUDA_INCLUDE_DIRS})
	link_directories($ENV{CUDA_PATH}/lib/x64)
else(${CUDA_FOUND})
	MESSAGE(STATUS "cuda not found!")
endif(${CUDA_FOUND})
# nvcc flags
#set(CUDA_NVCC_FLAGS -gencode arch=compute_20,code=sm_20;-G;-g)
#set(CUDA_NVCC_FLAGS -gencode arch=compute_52,code=sm_52;-G;-g)
# 添加要编译的库
#add_library(gpu SHARED ${CURRENT_HEADERS} ${CURRENT_SOURCES})
set_source_files_properties(test.cpp PROPERTIES CUDA_SOURCE_PROPERTY_FORMAT OBJ)
file(GLOB_RECURSE CURRENT_HEADERS  *.h *.hpp *.cuh)
file(GLOB CURRENT_SOURCES  *.cpp *.cu .cc)
source_group("Include" FILES ${CURRENT_HEADERS})
source_group("Source" FILES ${CURRENT_SOURCES})

CUDA_ADD_EXECUTABLE(test ${CURRENT_HEADERS} ${CURRENT_SOURCES})
target_link_libraries(test cuda.lib cudart_static.lib)
复制代码


2. nvcc 是cuda的编译器,有兴趣的本身看看。api

nvcc test.cpp main -x=cu
复制代码

下面这句很是关键,就是告诉编译器这个文件有cuda的语法,否则会没法编译经过。bash

set_source_files_properties(test.cpp PROPERTIES CUDA_SOURCE_PROPERTY_FORMAT OBJ)复制代码

link_directories($ENV{CUDA_PATH}/lib/x64) 复制代码
$ENV{CUDA_PATH} 是环境变量,通常在安装cuda会自动添加。复制代码

3. 连接库测试

target_link_libraries(test_cuda_project cuda.lib cudart_static.lib)复制代码

添加项目的连接库,通常 添加 cudart_static.lib就够了,若是一些cuda的api须要其余库,能够手动加进去,从新cmake.ui

4. 生成项目spa

在 vs2015下 code

cmake -G "Visual Studio 14 2015 Win64" .复制代码

在 linux 没试过,估计 对象

cmake . 复制代码

5. 测试代码get

#include <iostream>
#include <cuda_runtime_api.h>
bool InitCUDA()
{
	int count;

	cudaGetDeviceCount(&count);//得到cuda设备的数量

	if (count == 0)
	{
		std::cout << "There is no device.\n";
		return false;
	}

	int i;

	for (i = 0; i < count; i++)
	{
		cudaDeviceProp prop;//cuda设备属性对象

		if (cudaGetDeviceProperties(&prop, i) == cudaSuccess)
		{
			std::cout << "device name:" << prop.name << "\n";
			std::cout << " 1 compute value:" << prop.major << "\t" << "2 compute value:" << prop.minor << "\n";
			std::cout << "hz:" << prop.clockRate << "\n";

			std::cout << "processor nums:" << prop.multiProcessorCount << "\n";
			std::cout << "GPU is support muti p run:" << prop.concurrentKernels << "\n";
		}
	}

	cudaSetDevice(i);//启动设备

	return true;
}

int main()
{
	if (!InitCUDA())
	{
		return 0;
	}
	_sleep(10000);
	std::cout << "cuda !\n";
	return 0;
}
复制代码
相关文章
相关标签/搜索