Commit d21fd5e8 authored by Ogier Maitre's avatar Ogier Maitre

1.09rc2

parent 02abd036
#include <math.h>
#include <stdlib.h>
#include "include/CCuda.h"
#include <stdio.h>
CCuda::CCuda(unsigned parentSize, unsigned offSize, unsigned individualImplSize){
this->sizeOfIndividualImpl = individualImplSize;
this->cudaBuffer = (void*)malloc(this->sizeOfIndividualImpl*( (parentSize>offSize) ? parentSize : offSize));
}
CCuda::~CCuda(){
}
/*bool repartition(struct my_struct_gpu* gpu_infos){
//There is an implied minimum number of threads for each block
if(gpu_infos->num_Warp > gpu_infos->num_thread_max){
printf("You need to authorized at least %d threads on each block!\n",gpu_infos->num_Warp);
exit(1);
}
gpu_infos->dimGrid = gpu_infos->num_MP;
gpu_infos->dimBlock = gpu_infos->num_Warp;;
//While each element of the population can't be placed on the card
while(gpu_infos->dimBlock * gpu_infos->dimGrid < gpu_infos->sh_pop_size) {
//Every time we add the number of Warp to the value of dimBlock
if( (gpu_infos->dimBlock += gpu_infos->num_Warp) > gpu_infos->num_thread_max ) {
//If the number of dimBlock exceeds the number of threads max, we add the number of MP to the value of dimGrid and we reset the value of dimBlock with the number of Warp
gpu_infos->dimGrid += gpu_infos->num_MP;
gpu_infos->dimBlock = gpu_infos->num_Warp;
}
}
//Verification that we have enough place for all the population and that every constraints are respected
if( (gpu_infos->dimBlock*gpu_infos->dimGrid >= gpu_infos->sh_pop_size) && (gpu_infos->dimBlock <= gpu_infos->num_thread_max))
return true;
else
return false;
}
*/
/*
* CCuda.h
*
* Created on: 23 juin 2009
* Author: maitre
*/
#ifndef CCUDA_H_
#define CCUDA_H_
#include <iostream>
#include <semaphore.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
//#include <cuda_runtime_api.h>
#define CUDA_SAFE_CALL(f) \
{ \
cudaError_t err; \
err = f; \
if( err != cudaSuccess ){ \
printf("Error : %s\n",cudaGetErrorString(err)); \
exit(-1); \
} \
}
struct gpuOptions{};
struct my_struct_gpu{
int indiv_start;
int sh_pop_size;
int num_MP;
int num_thread_max;
int num_Warp;
int dimGrid;
int dimBlock;
cudaDeviceProp gpuProp;
};
struct gpuArg{
int gpuId;
int threadId;
sem_t sem_in;
sem_t sem_out;
void* d_population;
float* d_fitness;
};
class CCuda {
public:
void* cudaBuffer;
unsigned sizeOfIndividualImpl;
struct gpuOptions initOpts;
public:
CCuda(unsigned parentSize, unsigned offSize, unsigned individualImplSize);
~CCuda();
};
bool repartition(struct my_struct_gpu* gpu_infos);
#endif /* CCUDA_H_ */
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment