Commit 372a7263 authored by Ogier Maitre's avatar Ogier Maitre

Genetic programming on CPU

parent b8da7bbe
......@@ -25,6 +25,7 @@ Centre de Math
#define STD_FLAVOR_SO 0
#define STD_FLAVOR_MO 1
#define STD_FLAVOR_GP 2
#define CUDA_FLAVOR_SO 0
#define CUDA_FLAVOR_MO 1
#define CUDA_FLAVOR_GP 2
......
......@@ -698,7 +698,7 @@ exponent ([Ee][+-]?[0-9]+)
if( bCOPY_GP_EVAL_GPU ){
fprintf(fpOutputFile,"k_results[index] =");
}
else fprintf(fpOutputFile,"%s",yytext);
else fprintf(fpOutputFile,"return fitness=");
}
......@@ -1218,10 +1218,9 @@ exponent ([Ee][+-]?[0-9]+)
<TEMPLATE_ANALYSIS>"\\RED_FINAL_PRM" {fprintf(fpOutputFile,"%f",fRED_FINAL_PRM);}
<TEMPLATE_ANALYSIS>"\\POP_SIZE" {fprintf(fpOutputFile,"%d",nPOP_SIZE);}
<TEMPLATE_ANALYSIS>"\\OFF_SIZE" {fprintf(fpOutputFile,"%d",nOFF_SIZE);}
<TEMPLATE_ANALYSIS>"\\ELITE_SIZE" {
fprintf(fpOutputFile,"%d",nELITE);
////DEBUG_PRT_PRT("elitism is %d, elite size is %d",bELITISM, nELITE);
}
<TEMPLATE_ANALYSIS>"\\ELITE_SIZE" { fprintf(fpOutputFile,"%d",nELITE); }
<TEMPLATE_ANALYSIS>"\\FC_SIZE" { fprintf(fpOutputFile,"%d",iNO_FITNESS_CASES); }
<TEMPLATE_ANALYSIS>"\\RED_PAR" {
if( TARGET==CUDA || TARGET==STD){
......@@ -1308,10 +1307,7 @@ if(OPERATING_SYSTEM=WINDOWS)
if( TARGET==CUDA )
strcat(sFileName,"Individual.cu");
else if( TARGET==STD )
if( TARGET_FLAVOR==CUDA_FLAVOR_GP )
strcat(sFileName,"Individual.cu");
else
strcat(sFileName,"Individual.cpp");
strcat(sFileName,"Individual.cpp");
fpOutputFile=fopen(sFileName,"w");
if (bVERBOSE) printf("Creating %s...\n",sFileName);
}
......@@ -2495,6 +2491,8 @@ int CEASEALexer::create(CEASEAParser* pParser, CSymbolTable* pSymTable)
strcat(sTemp,"CMAES.tpl");
else if (TARGET_FLAVOR == MEMETIC )
strcat(sTemp,"STD_MEM.tpl");
else if (TARGET_FLAVOR == STD_FLAVOR_GP )
strcat(sTemp,"GP.tpl");
else
strcat(sTemp,"STD_MO.tpl");
if (!(yyin = fpTemplateFile = fopen(sTemp, "r"))){
......
......@@ -766,6 +766,10 @@ int main(int argc, char *argv[]){
else if (!mystricmp(sTemp,"memetic")) {
TARGET_FLAVOR = MEMETIC;
}
else if( !mystricmp(sTemp,"gp")){
TARGET = STD;
TARGET_FLAVOR = STD_FLAVOR_GP;
}
else if (!mystricmp(sTemp,"v")) bVERBOSE=true;
else if (!mystricmp(sTemp,"tl")){
......
......@@ -121,9 +121,9 @@ clean:
# rm -f EaseaParse.cpp EaseaParse.h EaseaLex.cpp EaseaLex.h
#EaseaParse.cpp: EaseaParse.y
# wine ~/.wine/drive_c/Program\ Files/Parser\ Generator/BIN/ayacc.exe $< -Tcpp -d
EaseaParse.cpp: EaseaParse.y
wine ~/.wine/drive_c/Program\ Files/Parser\ Generator/BIN/ayacc.exe $< -Tcpp -d
#EaseaLex.cpp: EaseaLex.l
# wine ~/.wine/drive_c/Program\ Files/Parser\ Generator/BIN/ALex.exe $< -Tcpp -i
EaseaLex.cpp: EaseaLex.l
wine ~/.wine/drive_c/Program\ Files/Parser\ Generator/BIN/ALex.exe $< -Tcpp -i
#endif
/*_________________________________________________________
/*_________________________________________________________
This is a standard GP implementation on EASEA,
aimed for regression.
......@@ -12,7 +10,7 @@
__________________________________________________________*/
\User declarations :
#define PI (3.141592653589793)
\end
\User functions:
......@@ -29,17 +27,17 @@ __________________________________________________________*/
int generateData(float*** inputs, float** outputs){
int i=0;
(*inputs) = new float*[NO_FITNESS_CASES];
(*outputs) = new float[NO_FITNESS_CASES];
(*inputs) = new float*[gNO_FITNESS_CASES];
(*outputs) = new float[gNO_FITNESS_CASES];
for( i=0 ; i<NO_FITNESS_CASES ; i++ ){
for( i=0 ; i<gNO_FITNESS_CASES ; i++ ){
(*inputs)[i]=new float[VAR_LEN];
float x = random(-10.,+10.);
(*inputs)[i][0] = x;
(*outputs)[i] = POLY(x);
}
return NO_FITNESS_CASES;
return gNO_FITNESS_CASES;
}
......@@ -136,7 +134,7 @@ ERROR = powf(expected_value-EVOLVED_VALUE,2);
\end
\GenomeClass::evaluator accumulator :
return sqrtf(ERROR/NO_FITNESS_CASES);
return sqrtf(ERROR/gNO_FITNESS_CASES);
\end
\User Makefile options:
......@@ -177,5 +175,5 @@ LDFLAGS+=
nb of GPUs : 1
size of prog buffer : 20000000
nb of fitness cases : 128
nb of fitness cases : 4096
\end
......@@ -204,7 +204,74 @@ GPNode* RAMPED_H_H(unsigned INIT_TREE_DEPTH_MIN, unsigned INIT_TREE_DEPTH_MAX, u
return construction_method( VAR_LEN+1, OPCODE_SIZE , 1, currentDepth ,full, opArity, OP_ERC);
}
/**
* Return the root of the nth node in a tree rooted at root.
*
* @arg root: the root of the tree.
* @arg N: the node number to return.
* @arg childId: id of the child corresponding to the selected node.
* @arg tree_depth_max: the maximum possible depth in trees.
* @arg max_arity: the size of the arity array.
*
* @return: a pointer to the parent of the nth node.
*/
GPNode* pickNthNode(GPNode* root, int N, int* childId, unsigned tree_depth_max, unsigned max_arity){
GPNode** stack = new GPNode*[tree_depth_max*max_arity];
GPNode** parentStack = new GPNode*[tree_depth_max*max_arity];
int stackPointer = 0;
parentStack[stackPointer] = NULL;
stack[stackPointer++] = root;
for( int i=0 ; i<N ; i++ ){
GPNode* currentNode = stack[stackPointer-1];
//cout << currentNode << endl;
stackPointer--;
for( int j=opArity[(int)currentNode->opCode] ; j>0 ; j--){
parentStack[stackPointer] = currentNode;
stack[stackPointer++] = currentNode->children[j-1];
}
}
if( stackPointer )
stackPointer--;
for( int i=0 ; i<(int)opArity[(int)parentStack[stackPointer]->opCode] ; i++ ){
if( parentStack[stackPointer]->children[i]==stack[stackPointer] ){
(*childId)=i;
break;
}
}
GPNode* ret = parentStack[stackPointer];
delete[] stack;
delete[] parentStack;
return ret;
}
/**
* Flatten a tree inside a buffer using RPN notation.
*
* @arg root: the root of the tree to flatten.
* @arg buf: the buffer where to flatten the tree.
* @arg index: the filling counter of the buffer. Is is modified by the function in order to
* reflect the new size after flattening the current individual.
* @arg max_prog_size: the size of the buffer.
* @arg op_erc_id: the id of the ERC opcode.
*
* @return: nothing important.
*/
int flattening_tree_rpn( GPNode* root, float* buf, int* index,int max_prog_size, int op_erc_id){
int i;
for( i=0 ; i<opArity[(int)root->opCode] ; i++ ){
flattening_tree_rpn(root->children[i],buf,index,max_prog_size,op_erc_id);
}
if( (*index)+2>max_prog_size )return 0;
buf[(*index)++] = root->opCode;
if( root->opCode == op_erc_id ) buf[(*index)++] = root->erc_value;
return 1;
}
......@@ -155,6 +155,7 @@ void parseArguments(const char* parametersFileName, int ac, char** av,
("printFinalPopulation",po::value<int>(),"Prints the final population (default : 0)")
("savePopulation",po::value<int>(),"Saves population at the end (default : 0)")
("startFromFile",po::value<int>(),"Loads the population from a .pop file (default : 0")
("fcSize",po::value<int>(),"Number of learning point for genetic programming (default : 0")
("u1",po::value<string>(),"User defined parameter 1")
("u2",po::value<string>(),"User defined parameter 2")
("u3",po::value<int>(),"User defined parameter 3")
......
......@@ -82,5 +82,6 @@ GPNode* RAMPED_H_H(unsigned iINIT_TREE_DEPTH_MIN, unsigned iINIT_TREE_DEPTH_MAX,
void flattenDatas2D( float** inputs, int length, int width, float** flat_inputs);
GPNode* construction_method( const int constLen, const int totalLen , const int currentDepth, const int maxDepth, const bool full, const unsigned* opArity, const int OP_ERC);
GPNode* pickNthNode(GPNode* root, int N, int* childId, unsigned tree_depth_max,unsigned max_arity);
int flattening_tree_rpn( GPNode* root, float* buf, int* index,int max_prog_size, int op_erc_id);
#endif // __C_GPNODE__
......@@ -87,6 +87,8 @@ int main(int argc, char** argv){
#include <iostream>
#include <sstream>
unsigned gNO_FITNESS_CASES=0;
unsigned aborded_crossover;
float* input_k;
float* output_k;
......@@ -167,23 +169,6 @@ __device__ float eval_tree_gpu(unsigned fc_id, const float * k_progs, const floa
return stack[0];
}
int flattening_tree_rpn( GPNode* root, float* buf, int* index){
int i;
for( i=0 ; i<opArity[(int)root->opCode] ; i++ ){
flattening_tree_rpn(root->children[i],buf,index);
}
if( (*index)+2>MAX_PROGS_SIZE )return 0;
buf[(*index)++] = root->opCode;
if( root->opCode == OP_ERC ) buf[(*index)++] = root->erc_value;
return 1;
}
/**
Send input and output data on the GPU memory.
Allocate
......@@ -238,6 +223,7 @@ EvaluatePostFixIndividuals_128(const float * k_progs,
const int tid = threadIdx.x; //0 to NUM_THREADS-1
const int bid = blockIdx.x; // 0 to NUM_BLOCKS-1
int gNO_FITNESS_CASES = trainingSetSize;
int index; // index of the prog processed by the block
......@@ -306,40 +292,6 @@ EvaluatePostFixIndividuals_128(const float * k_progs,
// here results and hits have been stored in their respective array: we can leave
}
GPNode* pickNthNode(GPNode* root, int N, int* childId){
GPNode* stack[TREE_DEPTH_MAX*MAX_ARITY];
GPNode* parentStack[TREE_DEPTH_MAX*MAX_ARITY];
int stackPointer = 0;
parentStack[stackPointer] = NULL;
stack[stackPointer++] = root;
for( int i=0 ; i<N ; i++ ){
GPNode* currentNode = stack[stackPointer-1];
//cout << currentNode << endl;
stackPointer--;
for( int j=opArity[currentNode->opCode] ; j>0 ; j--){
parentStack[stackPointer] = currentNode;
stack[stackPointer++] = currentNode->children[j-1];
}
}
//assert(stackPointer>0);
if( stackPointer )
stackPointer--;
//cout << "f : \n\t n :" << stack[stackPointer ] << "\n\t p :" << parentStack[stackPointer] << " cId : " << \
//(*childId) << endl;
for( int i=0 ; i<opArity[parentStack[stackPointer]->opCode] ; i++ ){
if( parentStack[stackPointer]->children[i]==stack[stackPointer] ){
(*childId)=i;
break;
}
}
return parentStack[stackPointer];
}
......@@ -488,8 +440,8 @@ void simpleCrossOver(IndividualImpl& p1, IndividualImpl& p2, IndividualImpl& c){
if( Np1!=0 ) stockParentNode = pickNthNode(c.root, MIN(Np1,nbNodeP1) ,&stockPointChildId);
if( Np2!=0 ) graftParentNode = pickNthNode(p2.root, MIN(Np2,nbNodeP1) ,&graftPointChildId);
if( Np1!=0 ) stockParentNode = pickNthNode(c.root, MIN(Np1,nbNodeP1) ,&stockPointChildId,TREE_DEPTH_MAX,MAX_ARITY);
if( Np2!=0 ) graftParentNode = pickNthNode(p2.root, MIN(Np2,nbNodeP1) ,&graftPointChildId,TREE_DEPTH_MAX,MAX_ARITY);
// is the stock and the graft an authorized type of node (leaf or inner-node)
if( Np1 && !stockCouldBeTerminal && opArity[stockParentNode->children[stockPointChildId]->opCode]==0 ) goto choose_node;
......@@ -537,7 +489,7 @@ float IndividualImpl::evaluate(){
float sum = 0;
\INSERT_GENOME_EVAL_HDR
for( int i=0 ; i<NO_FITNESS_CASES ; i++ ){
for( int i=0 ; i<gNO_FITNESS_CASES ; i++ ){
float EVOLVED_VALUE = recEval(this->root,inputs[i]);
\INSERT_GENOME_EVAL_BDY
sum += ERROR;
......@@ -584,7 +536,7 @@ void evale_pop_chunk(CIndividual** population, int popSize){
int index = 0;
for( int i=0 ; i<popSize ; i++ ){
indexes[i] = index;
flattening_tree_rpn( ((IndividualImpl*)population[i])->root, progs, &index);
flattening_tree_rpn( ((IndividualImpl*)population[i])->root, progs, &index,MAX_PROGS_SIZE,OP_ERC);
progs[index++] = OP_RETURN;
}
......@@ -595,7 +547,7 @@ void evale_pop_chunk(CIndividual** population, int popSize){
cudaStreamCreate(&st);
// Here we will do the real GPU evaluation
EvaluatePostFixIndividuals_128<<<popSize,128,0,st>>>( progs_k, index, popSize, input_k, output_k, NO_FITNESS_CASES, results_k, hits_k, indexes_k);
EvaluatePostFixIndividuals_128<<<popSize,128,0,st>>>( progs_k, index, popSize, input_k, output_k, gNO_FITNESS_CASES, results_k, hits_k, indexes_k);
CUDA_SAFE_CALL(cudaStreamSynchronize(st));
......@@ -631,11 +583,11 @@ void EASEAInit(int argc, char** argv){
// load data from csv file.
cout<<"Before everything else function called "<<endl;
cout << "number of point in fitness cases set : " << NO_FITNESS_CASES << endl;
cout << "number of point in fitness cases set : " << gNO_FITNESS_CASES << endl;
float* inputs_f = NULL;
flattenDatas2D(inputs,NO_FITNESS_CASES,VAR_LEN,&inputs_f);
flattenDatas2D(inputs,gNO_FITNESS_CASES,VAR_LEN,&inputs_f);
indexes = new int[maxPopSize];
hits = new int[maxPopSize];
......@@ -644,7 +596,7 @@ void EASEAInit(int argc, char** argv){
INSTEAD_EVAL_STEP=true;
initialDataToGPU(inputs_f, NO_FITNESS_CASES*VAR_LEN, outputs, NO_FITNESS_CASES);
initialDataToGPU(inputs_f, gNO_FITNESS_CASES*VAR_LEN, outputs, gNO_FITNESS_CASES);
}
......@@ -846,6 +798,8 @@ void ParametersImpl::setDefaultParameters(int argc, char** argv){
this->remoteIslandModel = setVariable("remoteIslandModel",\REMOTE_ISLAND_MODEL);
this->ipFile = (char*)setVariable("ipFile","\IP_FILE").c_str();
this->migrationProbability = setVariable("migrationProbability",(float)\MIGRATION_PROBABILITY);
gNO_FITNESS_CASES = setVariable("fcSize",\FC_SIZE);
}
CEvolutionaryAlgorithm* ParametersImpl::newEvolutionaryAlgorithm(){
......@@ -900,6 +854,8 @@ EvolutionaryAlgorithmImpl::~EvolutionaryAlgorithmImpl(){
#ifndef PROBLEM_DEP_H
#define PROBLEM_DEP_H
extern unsigned gNO_FITNESS_CASES;
\INSERT_GP_PARAMETERS
//#include "CRandomGenerator.h"
......@@ -1147,6 +1103,7 @@ easeaclean:
#
#***************************************
# --seed=0 # -S : Random number seed. It is possible to give a specific seed.
--fcSize=\FC_SIZE
###### Evolution Engine ######
--popSize=\POP_SIZE # -P : Population Size
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment