evonet.cpp
1 /********************************************************************************
2  * FARSA Experiments Library *
3  * Copyright (C) 2007-2012 *
4  * Stefano Nolfi <stefano.nolfi@istc.cnr.it> *
5  * Onofrio Gigliotta <onofrio.gigliotta@istc.cnr.it> *
6  * Gianluca Massera <emmegian@yahoo.it> *
7  * Tomassino Ferrauto <tomassino.ferrauto@istc.cnr.it> *
8  * *
9  * This program is free software; you can redistribute it and/or modify *
10  * it under the terms of the GNU General Public License as published by *
11  * the Free Software Foundation; either version 2 of the License, or *
12  * (at your option) any later version. *
13  * *
14  * This program is distributed in the hope that it will be useful, *
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17  * GNU General Public License for more details. *
18  * *
19  * You should have received a copy of the GNU General Public License *
20  * along with this program; if not, write to the Free Software *
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *
22  ********************************************************************************/
23 
24 #include "evonet.h"
25 #include "logger.h"
26 #include "configurationhelper.h"
27 #include "evonetui.h"
28 #include <QFileInfo>
29 #include <cstring>
30 #include <limits>
31 
32 #include <Eigen/Dense>
33 
34 #ifndef FARSA_MAC
35  #include <malloc.h> // DEBUG: to be sobstituted with new malloc()
36 #endif
37 
38 // This is needed because the isnan and isinf functions are not present windows
39 #ifdef WIN32
40  #include <float.h>
41  #define isnan(x) _isnan(x)
42  #define isinf(x) (!_finite(x))
43 #else
44  #define isnan(x) std::isnan(x)
45  #define isinf(x) std::isinf(x)
46 #endif
47 
48 // All the suff below is to avoid warnings on Windows about the use of unsafe
49 // functions. This should be only a temporary workaround, the solution is stop
50 // using C string and file functions...
51 #if defined(_MSC_VER)
52  #pragma warning(push)
53  #pragma warning(disable:4996)
54 #endif
55 
56 namespace farsa {
57 
58 const float Evonet::DEFAULT_VALUE = -99.0f;
59 
60 Evonet::Evonet()
61 {
62  wrange = 5.0; // weight range
63  grange = 5.0; // gain range
64  brange = 5.0; // bias range
65  neuronlesions = 0;
66  freep = new float[1000];
67  phep = NULL;
68  muts = NULL;
69  geneMaxValue = 255;
70  pheloaded = false;
71  selectedp= (float **) malloc(100 * sizeof(float **));
72  for (int i = 0; i < MAXN; i++) {
73  neuronlesion[i]=false;
74  neuronlesionVal[i]=0.0;
75  }
76  net_nblocks = 0;
77 
78  nextStoredActivation = 0;
79  firstStoredActivation = 0;
80  updatescounter = 0;
81 
82  training = false;
83 }
84 
85 void Evonet::configure(ConfigurationParameters& params, QString prefix) {
86  int nSensors = ConfigurationHelper::getInt( params, prefix+"nSensors", 0 );
87  int nHiddens = ConfigurationHelper::getInt( params, prefix+"nHiddens", 0 );
88  int nMotors = ConfigurationHelper::getInt( params, prefix+"nMotors", 0 );
89  QString netFile = ConfigurationHelper::getString( params, prefix+"netFile", "" );
90  // --- some parameters are in conflicts
91  if ( netFile != "" && (nSensors+nHiddens+nMotors)>0 ) {
92  Logger::error( "Evonet - The information inside netFile will override any specification in all others parameters of Evonet" );
93  }
94  wrange = ConfigurationHelper::getDouble(params, prefix + "weightRange", 5.0); // the range of synaptic weights
95  grange = ConfigurationHelper::getDouble(params, prefix + "gainRange", 5.0); // the range of gains
96  brange = ConfigurationHelper::getDouble(params, prefix + "biasRange", 5.0); // the range of biases
97 
98  if ( netFile.isEmpty() ) {
99  // generate a neural network from parameters
100  ninputs = nSensors;
101  nhiddens = nHiddens;
102  noutputs = nMotors;
103  nneurons = ninputs + nhiddens + noutputs;
104  if (this->nneurons > MAXN) {
105  ConfigurationHelper::throwUserConfigError(prefix + "(nSensors + nHiddens + nMotors)", QString::number(nneurons), "Too many neurons: increase MAXN to support more than " + QString::number(MAXN) + " neurons");
106  }
107  int inputNeuronType = 0;
108  QString str = ConfigurationHelper::getString( params, prefix + "inputNeuronType", "no_delta" );
109  if ( str == QString("no_delta") ) {
110  inputNeuronType = 0;
111  } else if ( str == QString("with_delta") ) {
112  inputNeuronType = 1;
113  } else {
114  ConfigurationHelper::throwUserConfigError(prefix + "inputNeuronType", str, "Wrong value (use \"no_delta\" or \"with_delta\"");
115  }
116  int hiddenNeuronType = 0;
117  str = ConfigurationHelper::getString( params, prefix + "hiddenNeuronType", "logistic" );
118  if ( str == QString("logistic") ) {
119  hiddenNeuronType = 0;
120  } else if ( str == QString("logistic+delta") ) {
121  hiddenNeuronType = 1;
122  } else if ( str == QString("binary") ) {
123  hiddenNeuronType = 2;
124  } else if ( str == QString("logistic_0.2") ) {
125  hiddenNeuronType = 3;
126  } else {
127  ConfigurationHelper::throwUserConfigError(prefix + "hiddenNeuronType", str, "Wrong value (use \"logistic\", \"logistic+delta\", \"binary\" or \"logistic_0.2\"");
128  }
129  int outputNeuronType = 0;
130  str = ConfigurationHelper::getString( params, prefix + "outputNeuronType", "no_delta" );
131  if ( str == QString("no_delta") ) {
132  outputNeuronType = 0;
133  } else if ( str == QString("with_delta") ) {
134  outputNeuronType = 1;
135  } else {
136  ConfigurationHelper::throwUserConfigError(prefix + "outputNeuronType", str, "Wrong value (use \"no_delta\" or \"with_delta\"");
137  }
138  bool recurrentHiddens = ConfigurationHelper::getBool( params, prefix + "recurrentHiddens", false );
139  bool inputOutputConnections = ConfigurationHelper::getBool( params, prefix + "inputOutputConnections", false );
140  bool recurrentOutputs = ConfigurationHelper::getBool( params, prefix + "recurrentOutputs", false );
141  bool biasOnHidden = ConfigurationHelper::getBool( params, prefix + "biasOnHiddenNeurons", false );
142  bool biasOnOutput = ConfigurationHelper::getBool( params, prefix + "biasOnOutputNeurons", false );
143  create_net_block( inputNeuronType, hiddenNeuronType, outputNeuronType, recurrentHiddens, inputOutputConnections, recurrentOutputs, biasOnHidden, biasOnOutput );
144  } else {
145  // load the neural network from file. If the file doesn't exists, throwing an exception
146  if (load_net_blocks(netFile.toAscii().data(), 0) == 0) {
147  ConfigurationHelper::throwUserConfigError(prefix + "netFile", netFile, "Could not open the specified network configuration file");
148  }
149  }
150 
151  computeParameters();
152  // --- reallocate data on the basis of number of parameters
153  delete[] freep;
154  freep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
155  for(int r=0;r<nparameters;r++)
156  freep[r]=0.0f;
157 
158  delete[] phep;
159  phep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
160  for(int r=0;r<nparameters;r++)
161  phep[r]=DEFAULT_VALUE; // default value correspond to dont' care
162 
163  delete[] muts;
164  muts=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
165  for(int r=0;r<nparameters;r++)
166  muts[r]=DEFAULT_VALUE; // default value correspond to dont' care
167 
168  if ( !netFile.isEmpty() ) {
169  // Try to Load the file filename.phe if present in the current directory
170 
171 /* char cCurrentPath[300];
172 
173  if (!getcwd(cCurrentPath, sizeof(cCurrentPath)))
174  {
175  printf("error\n");
176  }
177  else {
178  cCurrentPath[sizeof(cCurrentPath) - 1] = '\0';
179 
180  printf ("The current working directory is %s\n", cCurrentPath);
181  }
182 */
183  QFileInfo fileNet( netFile );
184  QString filePhe = fileNet.baseName() + ".phe";
185  load_net_blocks(filePhe.toAscii().data(), 1);
186  }
187 
188  //resetting net
189  resetNet();
190 
191  printBlocks();
192 
193  // we create the labels of the hidden neurons
194  for(int i = 0; i < nhiddens; i++) {
195  sprintf(neuronl[ninputs+i], "h%d", i);
196  neuronrange[ninputs+i][0] = 0.0;
197  neuronrange[ninputs+i][1] = 1.0;
198  neurondcolor[ninputs+i] = QColor(125,125,125);
199  }
200 }
201 
202 void Evonet::save(ConfigurationParameters& params, QString prefix) {
203  params.startObjectParameters( prefix, "Evonet", this );
204  if ( netFile.isEmpty() ) {
205  params.createParameter( prefix, "nSensors", QString::number(ninputs) );
206  params.createParameter( prefix, "nHidden", QString::number(nhiddens) );
207  params.createParameter( prefix, "nMotors", QString::number(noutputs) );
208  } else {
209  params.createParameter( prefix, "netFile", netFile );
210  }
211  params.createParameter( prefix, "weightRange", QString::number(wrange) );
212  params.createParameter( prefix, "gainRange", QString::number(grange) );
213  params.createParameter( prefix, "biasRange", QString::number(brange) );
214 }
215 
216 void Evonet::describe( QString type ) {
217  Descriptor d = addTypeDescription( type, "Neural Network imported from Evorobot" );
218  d.describeInt( "nSensors" ).limits( 1, MAXN ).help( "The number of sensor neurons" );
219  d.describeInt( "nHiddens" ).limits( 1, MAXN ).help( "The number of hidden neurons" );
220  d.describeInt( "nMotors" ).limits( 1, MAXN ).help( "The number of motor neurons" );
221  d.describeString( "netFile" ).help( "The file .net where is defined the architecture to load. WARNING: when this parameter is specified any other parameters will be ignored" );
222  d.describeReal( "weightRange" ).def(5.0f).limits(1,+Infinity).help( "The synpatic weight of the neural network can only assume values in [-weightRange, +weightRange]" );
223  d.describeReal( "gainRange" ).def(5.0f).limits(0,+Infinity).help( "The gain of a neuron will can only assume values in [0, +gainRange]" );
224  d.describeReal( "biasRange" ).def(5.0f).limits(0,+Infinity).help( "The bias of a neuron will can only assume values in [-biasRange, +biasRange]" );
225  d.describeEnum( "inputNeuronType" ).def("no_delta").values( QStringList() << "no_delta" << "with_delta" ).help( "The type of input neurons when the network is auto generated");
226  d.describeEnum( "hiddenNeuronType" ).def("logistic").values( QStringList() << "logistic" << "logistic+delta" << "binary" << "logistic_0.2" ).help( "The type of hidden neurons when the network is auto generated");
227  d.describeEnum( "outputNeuronType" ).def("no_delta").values( QStringList() << "no_delta" << "with_delta" ).help( "The type of output neurons when the network is auto generated");
228  d.describeBool( "recurrentHiddens" ).def(false).help( "when true generated a network with recurrent hidden neurons");
229  d.describeBool( "inputOutputConnections" ).def(false).help( "when true generated a network with input-output connections in addition to input-hidden-output connections");
230  d.describeBool( "recurrentOutputs" ).def(false).help( "when true generated a network with recurrent output neurons");
231  d.describeBool( "biasOnHiddenNeurons" ).def(true).help( "when true generate a network with hidden neurons with a bias");
232  d.describeBool( "biasOnOutputNeurons" ).def(true).help( "when true generate a network with output neurons with a bias");
233 }
234 
236  return new EvonetUI( this );
237 }
238 
239 void Evonet::create_net_block( int inputNeuronType, int hiddenNeuronType, int outputNeuronType, bool recurrentHiddens, bool inputOutputConnections, bool recurrentOutputs, bool biasOnHidden, bool biasOnOutput )
240 {
241  int n;
242  int i;
243  int startx;
244  int dx;
245 
246  // setting the neuron types
247  for(i = 0; i < this->ninputs; i++) {
248  this->neurontype[i]= inputNeuronType;
249  neuronbias[i] = 0;
250  }
251  for(i = this->ninputs; i < (this->nneurons - this->noutputs); i++) {
252  this->neurontype[i]= hiddenNeuronType;
253  neuronbias[i] = (biasOnHidden) ? 1 : 0;
254  }
255  for(i = (this->nneurons - this->noutputs); i < this->nneurons; i++) {
256  this->neurontype[i]= outputNeuronType;
257  neuronbias[i] = (biasOnOutput) ? 1 : 0;
258  }
259 
260  // gain
261  for(i=0; i < this->nneurons; i++) {
262  this->neurongain[i]= 0;
263  }
264 
265  this->net_nblocks = 0;
266  // input update block
267  this->net_block[this->net_nblocks][0] = 1;
268  this->net_block[this->net_nblocks][1] = 0;
269  this->net_block[this->net_nblocks][2] = this->ninputs;
270  this->net_block[this->net_nblocks][3] = 0;
271  this->net_block[this->net_nblocks][4] = 0;
272  this->net_block[this->net_nblocks][5] = 0;
273  this->net_nblocks++;
274 
275  // input-hidden connections
276  if (this->nhiddens > 0) {
277  this->net_block[this->net_nblocks][0] = 0;
278  this->net_block[this->net_nblocks][1] = this->ninputs;
279  this->net_block[this->net_nblocks][2] = this->nhiddens;
280  this->net_block[this->net_nblocks][3] = 0;
281  this->net_block[this->net_nblocks][4] = this->ninputs;
282  this->net_block[this->net_nblocks][5] = 0;
283  this->net_nblocks++;
284  }
285 
286  // hidden-hidden connections
287  if (recurrentHiddens) {
288  this->net_block[this->net_nblocks][0] = 0;
289  this->net_block[this->net_nblocks][1] = this->ninputs;
290  this->net_block[this->net_nblocks][2] = this->nhiddens;
291  this->net_block[this->net_nblocks][3] = this->ninputs;
292  this->net_block[this->net_nblocks][4] = this->nhiddens;
293  this->net_block[this->net_nblocks][5] = 0;
294  this->net_nblocks++;
295  }
296 
297  // hidden update block
298  if (this->nhiddens > 0) {
299  this->net_block[this->net_nblocks][0] = 1;
300  this->net_block[this->net_nblocks][1] = this->ninputs;
301  this->net_block[this->net_nblocks][2] = this->nhiddens;
302  this->net_block[this->net_nblocks][3] = 0;
303  this->net_block[this->net_nblocks][4] = 0;
304  this->net_block[this->net_nblocks][5] = 0;
305  this->net_nblocks++;
306  }
307 
308  // input-output connections
309  if (this->nhiddens == 0 || inputOutputConnections) {
310  this->net_block[this->net_nblocks][0] = 0;
311  this->net_block[this->net_nblocks][1] = this->ninputs + this->nhiddens;
312  this->net_block[this->net_nblocks][2] = this->noutputs;
313  this->net_block[this->net_nblocks][3] = 0;
314  this->net_block[this->net_nblocks][4] = this->ninputs;
315  this->net_block[this->net_nblocks][5] = 0;
316  this->net_nblocks++;
317  }
318 
319  // hidden-output connections
320  if (this->nhiddens > 0) {
321  this->net_block[net_nblocks][0] = 0;
322  this->net_block[net_nblocks][1] = this->ninputs + this->nhiddens;
323  this->net_block[net_nblocks][2] = this->noutputs;
324  this->net_block[net_nblocks][3] = this->ninputs;
325  this->net_block[net_nblocks][4] = this->nhiddens;
326  this->net_block[this->net_nblocks][5] = 0;
327  this->net_nblocks++;
328  }
329 
330  // output-output connections
331  if (recurrentOutputs) {
332  this->net_block[this->net_nblocks][0] = 0;
333  this->net_block[this->net_nblocks][1] = this->ninputs + this->nhiddens;
334  this->net_block[this->net_nblocks][2] = this->noutputs;
335  this->net_block[this->net_nblocks][3] = this->ninputs + this->nhiddens;
336  this->net_block[this->net_nblocks][4] = this->noutputs;
337  this->net_block[this->net_nblocks][5] = 0;
338  this->net_nblocks++;
339  }
340 
341  // output update block
342  this->net_block[this->net_nblocks][0] = 1;
343  this->net_block[this->net_nblocks][1] = this->ninputs + this->nhiddens;
344  this->net_block[this->net_nblocks][2] = this->noutputs;
345  this->net_block[this->net_nblocks][3] = 0;
346  this->net_block[this->net_nblocks][4] = 0;
347  this->net_block[this->net_nblocks][5] = 0;
348  this->net_nblocks++;
349 
350  // cartesian xy coordinate for sensory neurons for display (y=400)
351  n = 0;
352  dx = 30;//25
353  if (this->ninputs > this->noutputs) {
354  startx = 50;
355  } else {
356  startx = ((this->noutputs - this->ninputs) / 2) * dx + 50;
357  }
358  for(i = 0; i < this->ninputs; i++, n++) {
359  this->neuronxy[n][0] = (i * dx) + startx;
360  this->neuronxy[n][1] = 400;
361  }
362 
363  // cartesian xy coordinate for internal neurons for display (y=225)
364  startx = this->ninputs * dx;
365  for(i=0; i < (this->nneurons - (this->ninputs + this->noutputs)); i++, n++) {
366  this->neuronxy[n][0] = startx + (i * dx);
367  this->neuronxy[n][1] = 225;
368  }
369 
370  // cartesian xy coordinate for motor neurons for display (y=50)
371  if (this->ninputs > this->noutputs) {
372  startx = ((this->ninputs - this->noutputs) / 2) * dx + 50;
373  } else {
374  startx = 50;
375  }
376  for(i=0; i < this->noutputs; i++, n++) {
377  this->neuronxy[n][0] = startx + (i * dx);
378  this->neuronxy[n][1] = 50;
379  }
380 
381  // set neurons whose activation should be displayed
382  for(i=0; i < this->nneurons; i++) {
383  this->neurondisplay[i] = 1;
384  }
385 
386  // calculate the height and width necessary to display all created neurons (drawnymax, drawnxmax)
387  drawnymax = 400 + 30;
388  for(i = 0, drawnxmax = 0; i < nneurons; i++) {
389  if (neuronxy[i][0] > drawnxmax) {
390  drawnxmax = neuronxy[i][0];
391  }
392  }
393  drawnxmax += 60;
394 
395  // compute the number of parameters
396  computeParameters();
397 
398  //i = this->ninputs;
399  //if (this->ninputs > i)
400  // i = this->noutputs;
401  //if ((this->nneurons - this->noutputs) > i)
402  // i = (this->nneurons - this->noutputs);
403  //drawnxmax = (i * dx) + dx + 30;
404 }
405 
406 int Evonet::load_net_blocks(const char *filename, int mode)
407 {
408 
409  FILE *fp;
410  int b;
411  int n;
412  int i;
413  float *ph;
414  float *mu;
415  float *p;
416  int np;
417  const int bufferSize = 128;
418  char cbuffer[bufferSize];
419 
420  if ((fp = fopen(filename,"r")) != NULL)
421  {
422  fscanf(fp,"ARCHITECTURE\n");
423  fscanf(fp,"nneurons %d\n", &nneurons);
424  fscanf(fp,"nsensors %d\n", &ninputs);
425  fscanf(fp,"nmotors %d\n", &noutputs);
426  if (nneurons > MAXN)
427  Logger::error( "Evonet - increase MAXN to support more than "+QString::number(MAXN)+" neurons" );
428  nhiddens = nneurons - (ninputs + noutputs);
429  fscanf(fp,"nblocks %d\n", &net_nblocks);
430  for (b=0; b < net_nblocks; b++)
431  {
432  fscanf(fp,"%d %d %d %d %d %d", &net_block[b][0],&net_block[b][1],&net_block[b][2],&net_block[b][3],&net_block[b][4], &net_block[b][5]);
433  if (net_block[b][0] == 0)
434  fscanf(fp," // connections block\n");
435  if (net_block[b][0] == 1)
436  fscanf(fp," // block to be updated\n");
437  if (net_block[b][0] == 2)
438  fscanf(fp," // gain block\n");
439  if (net_block[b][0] == 3)
440  fscanf(fp," // modulated gain block\n");
441  }
442 
443  fscanf(fp,"neurons bias, delta, gain, xy position, display\n");
444  drawnxmax = 0;
445  drawnymax = 0;
446  for(n=0; n < nneurons; n++)
447  {
448  fscanf(fp,"%d %d %d %d %d %d\n", &neuronbias[n], &neurontype[n], &neurongain[n], &neuronxy[n][0], &neuronxy[n][1], &neurondisplay[n]);
449  if(drawnxmax < neuronxy[n][0])
450  drawnxmax = neuronxy[n][0];
451  if(drawnymax < neuronxy[n][1])
452  drawnymax = neuronxy[n][1];
453  }
454  drawnxmax += 30;
455  drawnymax += 30;
456 
457  if (mode == 1)
458  {
459  fscanf(fp,"FREE PARAMETERS %d\n", &np);
460  if (nparameters != np) {
461  Logger::error(QString("ERROR: parameters defined are %1 while %2 contains %3 parameters").arg(nparameters).arg(filename).arg(np));
462  }
463  i = 0;
464  ph = phep;
465  mu = muts;
466  p = freep;
467 
468  while (fgets(cbuffer,bufferSize,fp) != NULL && i < np)
469  {
470  //read values from line
471  QString line = cbuffer;
472  QStringList lineContent = line.split(QRegExp("\\s+"), QString::SkipEmptyParts);
473 
474  bool floatOnSecondPlace = false;
475  lineContent[1].toFloat(&floatOnSecondPlace);
476 
477  if(lineContent.contains("*") || floatOnSecondPlace)
478  readNewPheLine(lineContent, ph, mu);
479  else
480  readOldPheLine(lineContent, ph, mu);
481 
482  *p = *ph;
483 
484  i++;
485  mu++;
486  ph++;
487  p++;
488  }
489  pheloaded = true;
490  }
491  fclose(fp);
492 
493  Logger::info( "Evonet - loaded file " + QString(filename) );
494  return(1);
495  }
496  else
497  {
498  Logger::warning( "Evonet - File " + QString(filename) + " not found" );
499  return(0);
500  }
501 }
502 
503 void Evonet::readOldPheLine(QStringList line, float* par, float* mut)
504 {
505  *par = line[0].toFloat();
506 
507  if(*par != DEFAULT_VALUE) { //no mutations
508  *mut = 0;
509  }
510 }
511 
512 void Evonet::readNewPheLine(QStringList line, float* par, float* mut)
513 {
514  if(line[0] == "*") {
515  *par = DEFAULT_VALUE; //start at random
516  } else {
517  //error handling
518 
519 /* char *tmp = line[0].toAscii().data();
520  printf("read : %s\n",line[0].toAscii().data());
521  printf("tmp : %s\n",tmp);
522 
523 /* for (int i = 0; i<line[0].length(); i++) {
524  if (tmp[i]==',') {
525  tmp[i] = '.';
526 
527  }
528  }
529 */
530 // *par = strtof(tmp, NULL);
531 
532 
533 
534 // sscanf(tmp, "%f", par);
535  *par = line[0].toFloat();
536  }
537 
538 
539  if(line[1] == "*") {
540  *mut = DEFAULT_VALUE;
541  } else {
542  *mut = line[1].toFloat();
543  }
544 }
545 
546 /*
547  * It save the architecture and also the parameters (when mode =1)
548  */
549 void Evonet::save_net_blocks(const char *filename, int mode)
550 {
551  FILE *fp;
552  int b;
553  int n;
554  int i;
555  int t;
556 
557  char* default_string = "*\t\t";
558  char **p = new char*[freeParameters()];
559  char **mu = new char*[freeParameters()];
560  for(int h=0; h<freeParameters(); h++) {
561  mu[h] = new char[50];
562  p[h] = new char[50];
563 
564  if(muts[h] == DEFAULT_VALUE) {
565  mu[h] = default_string;
566  } else {
567  sprintf(mu[h], "%f", muts[h]);
568  }
569 
570  if(freep[h] == DEFAULT_VALUE) {
571  p[h] = default_string;
572  } else {
573  sprintf(p[h], "%f", freep[h]);
574  }
575  }
576 
577  if ((fp = fopen(filename,"w")) != NULL) {
578  fprintf(fp,"ARCHITECTURE\n");
579  fprintf(fp,"nneurons %d\n", nneurons);
580  fprintf(fp,"nsensors %d\n", ninputs);
581  fprintf(fp,"nmotors %d\n", noutputs);
582  fprintf(fp,"nblocks %d\n", net_nblocks);
583  for (b = 0; b < net_nblocks; b++) {
584  fprintf(fp,"%d %d %d %d %d %d", net_block[b][0],net_block[b][1],net_block[b][2],net_block[b][3],net_block[b][4],net_block[b][5]);
585  if (net_block[b][0] == 0) {
586  fprintf(fp," // connections block\n");
587  } else if (net_block[b][0] == 1) {
588  fprintf(fp," // block to be updated\n");
589  } else if (net_block[b][0] == 2) {
590  fprintf(fp," // gain block\n");
591  } else if (net_block[b][0] == 3) {
592  fprintf(fp," // modulated gain block\n");
593  }
594  }
595  fprintf(fp,"neurons bias, delta, gain, xy position, display\n");
596  for(n = 0; n < nneurons; n++) {
597  fprintf(fp,"%d %d %d %d %d %d\n", neuronbias[n], neurontype[n], neurongain[n], neuronxy[n][0], neuronxy[n][1], neurondisplay[n]);
598  }
599 
600  computeParameters();
601  if (mode == 1) {
602  fprintf(fp,"FREE PARAMETERS %d\n", nparameters);
603  for(i = 0; i < nneurons; i++) {
604  if (neurongain[i] == 1) {
605  fprintf(fp,"%s \t %s \tgain %s\n",*p, *mu, neuronl[i]);
606  p++;
607  mu++;
608  }
609  }
610  for(i=0; i<nneurons; i++) {
611  if (neuronbias[i] == 1) {
612  fprintf(fp,"%s \t %s \tbias %s\n",*p, *mu, neuronl[i]);
613  p++;
614  mu++;
615  }
616  }
617  for (b=0; b < net_nblocks; b++) {
618  if (net_block[b][0] == 0) {
619  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
620  for(i=net_block[b][3]; i < net_block[b][3] + net_block[b][4];i++) {
621  fprintf(fp,"%s \t %s \tweight %s from %s\n",*p, *mu, neuronl[t], neuronl[i]);
622  p++;
623  mu++;
624  }
625  }
626  } else if (net_block[b][0] == 1) {
627  for(t=net_block[b][1]; t < (net_block[b][1] + net_block[b][2]); t++) {
628  if (neurontype[t] == 1) {
629  float timeC = 0;
630  if(*p != default_string) {
631  timeC = atof(*p);
632  timeC = fabs(timeC)/wrange; //(timeC + wrange)/(wrange*2);
633  }
634 
635  fprintf(fp,"%s \t %s \ttimeconstant %s (%f)\n", *p, *mu, neuronl[t], timeC);
636  p++;
637  mu++;
638  }
639  }
640  }
641  }
642  }
643  fprintf(fp,"END\n");
644 
645  Logger::info( "Evonet - controller saved on file " + QString(filename) );
646  } else {
647  Logger::error( "Evonet - unable to create the file " + QString(filename) );
648  }
649  fclose(fp);
650 }
651 
652 /*
653  * standard logistic
654  */
655 float Evonet::logistic(float f)
656 {
657  return((float) (1.0 / (1.0 + exp(0.0 - f))));
658 }
659 
660  float Evonet::tansig(float f) {
661 
662  return 2.0/(1.0+exp(-2.0*f))-1.0;
663  }
664 
665 /*
666  * compute the number of free parameters
667  */
668 void Evonet::computeParameters()
669 {
670  int i;
671  int t;
672  int b;
673  int updated[MAXN];
674  int ng;
675  int nwarnings;
676 
677  ng = 0;
678  for(i=0;i < nneurons;i++) {
679  updated[i] = 0;
680  }
681  // gain
682  for(i=0;i < nneurons;i++) {
683  if (neurongain[i] == 1) {
684  ng++;
685  }
686  }
687  // biases
688  for(i=0;i < nneurons;i++) {
689  if (neuronbias[i] == 1) {
690  ng++;
691  }
692  }
693  // timeconstants
694  for(i=0;i < nneurons;i++) {
695  if (neurontype[i] == 1) {
696  ng++;
697  }
698  }
699  // blocks
700  for (b=0; b < net_nblocks; b++) {
701  // connection block
702  if (net_block[b][0] == 0) {
703  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
704  for(i=net_block[b][3]; i < net_block[b][3] + net_block[b][4];i++) {
705  ng++;
706  }
707  }
708  }
709  }
710 
711  nwarnings = 0;
712  for(i=0;i < nneurons;i++) {
713  if (updated[i] < 1 && nwarnings == 0) {
714  Logger::warning( "Evonet - neuron " + QString::number(i) + " will never be activated according to the current architecture" );
715  nwarnings++;
716  }
717  if (updated[i] > 1 && nwarnings == 0) {
718  Logger::warning( "Evonet - neuron " + QString::number(i) + " will be activated more than once according to the current architecture" );
719  nwarnings++;
720  }
721  }
722  nparameters=ng; // number of parameters
723 }
724 
725 void Evonet::updateNet()
726 {
727  int i;
728  int t;
729  int b;
730  float *p;
731  float delta;
732  float netinput[MAXN];
733  float gain[MAXN];
734 
735  p = freep;
736  //nl = neuronlesion;
737 
738  // gain
739  for(i=0;i < nneurons;i++) {
740  if (neurongain[i] == 1) {
741  gain[i] = (float) (fabs((double) *p) / wrange) * grange;
742  p++;
743  } else {
744  gain[i] = 1.0f;
745  }
746  }
747  // biases
748 
749 /* printf("weights: ");
750  printWeights();
751  printf("\n");
752 */
753  for(i=0;i < nneurons;i++) {
754  if (neuronbias[i] == 1) {
755 // printf("netinput[%d]= [%ld] %f/%f*%f = %f*%f = %f",i,p-freep, *p,wrange,brange,((double)*p/wrange),brange,((double)*p/wrange)*brange);
756  netinput[i] = ((double)*p/wrange)*brange;
757  p++;
758  } else {
759  netinput[i] = 0.0f;
760  }
761 // printf("\tnetinput[%d]: %f -- wrange: %f\n",i,netinput[i],wrange);
762  }
763 
764 
765  // blocks
766  for (b=0; b < net_nblocks; b++) {
767  // connection block
768  if (net_block[b][0] == 0) {
769  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
770  for(i=net_block[b][3]; i < net_block[b][3] + net_block[b][4];i++) {
771  netinput[t] += act[i] * gain[i] * *p;
772 // printf("netinput[%d] += act[%d] * gain[%d] * %f += %f * %f * %f += %f = %f\n",t,i,i,*p,act[i],gain[i], *p,act[i] * gain[i] * *p, netinput[t] );
773  p++;
774  }
775  }
776  }
777  // gain block (gain of neuron a-b set equal to gain of neuron a)
778  if (net_block[b][0] == 2) {
779  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
780  gain[t] = gain[net_block[b][1]];
781  }
782  }
783  // gain block (gain of neuron a-b set equal to act[c])
784  if (net_block[b][0] == 3) {
785  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
786  gain[t] = act[net_block[b][3]];
787  }
788  }
789  // update block
790  if (net_block[b][0] == 1) {
791  for(t=net_block[b][1]; t < (net_block[b][1] + net_block[b][2]); t++) {
792  if (t < ninputs) {
793  switch(neurontype[t]) {
794  case 0: // simple rely units
795  act[t] = input[t];
796  break;
797  case 1: // delta neurons
798  delta = (float) (fabs((double) *p) / wrange);
799  p++;
800  act[t] = (act[t] * delta) + (input[t] * (1.0f - delta));
801  break;
802  }
803  if(neuronlesions > 0 && neuronlesion[t]) {
804  act[t]= (float)neuronlesionVal[t];
805  }
806  } else {
807  switch(neurontype[t]) {
808  case 0: // simple logistic
809  default:
810  act[t] = logistic(netinput[t]);
811  delta = 0.0;
812  break;
813  case 1: // delta neurons
814  delta = (float) (fabs((double) *p) / wrange);
815  p++;
816  act[t] = (act[t] * delta) + (logistic(netinput[t]) * (1.0f - delta));
817  break;
818  case 2: // binary neurons
819  if (netinput[t] >= 0.0) {
820  act[t] = 1.0;
821  } else {
822  act[t] = 0.0;
823  }
824  break;
825  case 3: // logistic2 neurons
826  act[t] = logistic(netinput[t]*0.2f);
827  delta = 0.0;
828  break;
829  }
830  if(neuronlesions > 0 && neuronlesion[t]) {
831  act[t]= (float)neuronlesionVal[t];
832  }
833  }
834  }
835  }
836  }
837 
838  // Storing the current activations
839  memcpy(storedActivations[nextStoredActivation], act, nneurons * sizeof(float));
840  nextStoredActivation = (nextStoredActivation + 1) % MAXSTOREDACTIVATIONS;
841  if (firstStoredActivation == nextStoredActivation) {
842  // We have filled the circular buffer, discarding the oldest activation
843  firstStoredActivation = (firstStoredActivation + 1) % MAXSTOREDACTIVATIONS;
844  }
845 
846  // increment the counter
847  updatescounter++;
848 
849  emit evonetUpdated();
850 }
851 
852 int Evonet::setInput(int inp, float value)
853 {
854  if (inp>=ninputs || inp<0) {
855  return -1;// exceding sensor number
856  }
857  input[inp]=value;
858  return 0;
859 }
860 
861 float Evonet::getOutput(int out)
862 {
863  if(out>=noutputs) {
864  return -1; //exceeding out numbers
865  }
866  return act[ninputs+nhiddens+out];
867 }
868 
869 float Evonet::getInput(int in)
870 {
871  return this->input[in];
872 }
873 
874 float Evonet::getNeuron(int in)
875 {
876  return act[in];
877 }
878 
879 void Evonet::resetNet()
880 {
881  int i;
882  for (i = 0; i < MAXN; i++) {
883  act[i]=0.0;
884  netinput[i]=0.0;
885  input[i]=0.0;
886  }
887  updatescounter = 0;
888 }
889 
890 void Evonet::injectHidden(int nh, float val)
891 {
892  if(nh<nhiddens) {
893  act[this->ninputs+nh] = val;
894  }
895 }
896 
897 float Evonet::getHidden(int h)
898 {
899  if(h<nhiddens && h>=0) {
900  return act[this->ninputs+h];
901  } else {
902  return -999;
903  }
904 }
905 
906 int Evonet::freeParameters()
907 {
908  return this->nparameters;
909 }
910 
911 float Evonet::getFreeParameter(int i)
912 {
913  return freep[i];
914 }
915 
916 bool Evonet::pheFileLoaded()
917 {
918  return pheloaded;
919 }
920 
921 /*
922  * Copy parameters from genotype
923  */
924 void Evonet::setParameters(const int *dt)
925 {
926  int i;
927  float *p;
928 
929  p = freep;
930  for (i=0; i<freeParameters(); i++, p++) {
931  *p = wrange - ((float)dt[i]/geneMaxValue)*wrange*2;
932  }
933  emit evonetUpdated();
934 }
935 
936 void Evonet::getMutations(float* GAmut)
937 {
938  //copy mutation vector
939  for(int i=0; i<freeParameters(); i++) {
940  GAmut[i] = muts[i];
941  }
942 }
943 
944 void Evonet::copyPheParameters(int* pheGene)
945 {
946  for(int i=0; i<freeParameters(); i++)
947  {
948  if(phep[i] == DEFAULT_VALUE) {
949  pheGene[i] = DEFAULT_VALUE;
950  } else {
951  pheGene[i] = (int)((wrange - phep[i])*geneMaxValue/(2*wrange));
952  }
953  }
954 }
955 
956 void Evonet::printIO()
957 {
958  QString output;
959 
960  output = "In: ";
961  for (int in = 0; in < this->ninputs; in++) {
962  output += QString("%1 ").arg(this->input[in], 0, 'f', 10);
963  }
964  output += "Hid: ";
965  for (int hi = this->ninputs; hi < (this->nneurons - this->noutputs); hi++) {
966  output += QString("%1 ").arg(this->act[hi], 0, 'f', 10);
967  }
968  output += "Out: ";
969  for (int out = 0; out < this->noutputs; out++) {
970  output += QString("%1 ").arg(this->act[this->ninputs+this->nhiddens+out], 0, 'f', 10);
971  }
972 
973  Logger::info(output);
974 
975 }
976 
977 int Evonet::getParamBias(int nbias)
978 {
979  int pb=-999; // if remain -999 it means nbias is out of range
980  if (nbias<nparambias && nbias>-1) {
981  pb=(int) freep[nparambias+nbias];
982  }
983  return pb;
984 }
985 
986 float Evonet::getWrange()
987 {
988  return wrange;
989 }
990 
991 
992 void Evonet::printBlocks()
993 {
994  Logger::info("Evonet - ninputs " + QString::number(this->ninputs));
995  Logger::info("Evonet - nhiddens " + QString::number(this->nhiddens));
996  Logger::info("Evonet - noutputs " + QString::number(this->noutputs));
997  Logger::info("Evonet - nneurons " + QString::number(this->nneurons));
998 
999  for(int i=0;i<this->net_nblocks;i++) {
1000  Logger::info( QString( "Evonet Block - %1 | %2 - %3 -> %4 - %5 | %6" )
1001  .arg(net_block[i][0])
1002  .arg(net_block[i][1])
1003  .arg(net_block[i][2])
1004  .arg(net_block[i][3])
1005  .arg(net_block[i][4])
1006  .arg(net_block[i][5]));
1007  }
1008 }
1009 
1010 int Evonet::getNoInputs()
1011 {
1012  return ninputs;
1013 }
1014 
1015 int Evonet::getNoHiddens()
1016 {
1017  return nhiddens;
1018 }
1019 
1020 int Evonet::getNoOutputs()
1021 {
1022  return noutputs;
1023 }
1024 
1025 int Evonet::getNoNeurons()
1026 {
1027  return nneurons;
1028 }
1029 
1030 void Evonet::setRanges(double weight, double bias, double gain)
1031 {
1032  wrange=weight;
1033  brange=bias;
1034  grange=gain;
1035 }
1036 
1038 {
1039  if (firstStoredActivation == nextStoredActivation) {
1040  return NULL;
1041  }
1042 
1043  const int ret = firstStoredActivation;
1044  firstStoredActivation = (firstStoredActivation + 1) % MAXSTOREDACTIVATIONS;
1045  return storedActivations[ret];
1046 }
1047 
1049  return updatescounter;
1050 }
1051 
1052  void Evonet::printWeights() {
1053  for (int i = 0; i<freeParameters(); i++) {
1054  printf("%f\n",freep[i]);
1055  }
1056  }
1057 
1058  void Evonet::initWeightsInRange(float min, float max) {
1059 
1060  float range = max-min;
1061 
1062  for (int i = 0; i<freeParameters(); i++) {
1063  freep[i] = (((float) rand())/RAND_MAX)*range +min;
1064 // freep[i] = 0.2;
1065  }
1066 
1067 /*
1068  freep[0] = 0.26;
1069  freep[1] = 0.27;
1070  freep[2] = 0.28;
1071  freep[3] = -0.23;
1072  freep[4] = 0.2;
1073  freep[5] = 0.21;
1074  freep[6] = 0.22;
1075  freep[7] = 0.23;
1076  freep[8] = 0.24;
1077  freep[9] = 0.25;
1078  freep[10] = -0.2;
1079  freep[11] = -0.21;
1080  freep[12] = -0.22;
1081 */
1082  }
1083 
1084  void Evonet::initWeightsNguyenWidrow(float min, float max) {
1085 
1086  initWeightsInRange(min, max);
1087 
1088  double beta = 0.7 * pow(nhiddens, 1.0/ninputs);
1089  double norm = 0;
1090 
1091  double tmp;
1092 
1093  for (int i =0; i<nhiddens; i++) {
1094  for (int j = 0; j<ninputs; j++) {
1095  tmp = getWeight(i+ninputs, j);
1096  norm += tmp*tmp;
1097  }
1098 
1099  if (neuronbias[i+ninputs]) {
1100 
1101  int ptr = 0;
1102  for (int j = 0; j<i+ninputs; j++) {
1103  if(neuronbias[j])
1104  ptr++;
1105  }
1106 
1107  norm += freep[ptr]*freep[ptr];
1108  }
1109 
1110 
1111  }
1112 
1113  norm = sqrt(norm);
1114 
1115  double k = beta/norm;
1116 
1117  for (int i =0; i<nhiddens; i++) {
1118  for (int j = 0; j<ninputs; j++) {
1119  setWeight(i+ninputs, j, getWeight(i+ninputs, j)*k);
1120  }
1121 
1122  if (neuronbias[i+ninputs]) {
1123 
1124  int ptr = 0;
1125  for (int j = 0; j<i+ninputs; j++) {
1126  if(neuronbias[j])
1127  ptr++;
1128  }
1129 
1130  freep[ptr]*=k;
1131  }
1132  }
1133 
1134 
1135  }
1136 
1137 
1138  void Evonet::hardwire() {
1139 
1140  //for (int i = 0; i<freeParameters(); i++) {
1141  // freep[i] = 0;
1142  //}
1143 
1144  int ptr = 0;
1145 
1146  for (int i = 0; i<nneurons; i++) {
1147  if (neuronbias[i]) {
1148  if (i>16 && i<23) {
1149  freep[ptr++]=0;
1150  }
1151  else {
1152 // freep[ptr++]=;
1153  ptr++;
1154  }
1155  }
1156  }
1157 
1158 
1159  for (int b=0; b<net_nblocks; b++) {
1160  for (int i=0; i<net_block[b][2]*net_block[b][4]; i++) {
1161  if (net_block[b][0] == 0 && net_block[b][5]==1){
1162 // freep[ptr++]=-1;
1163  ptr++;
1164  }
1165  else if (net_block[b][0] == 0 && net_block[b][5]==0){
1166  freep[ptr++]=0;
1167  }
1168  }
1169  }
1170  //Serve ad impostare i pesi del riflesso
1171 
1172  int p = 0;
1173  for (int i = 0; i<nneurons; i++)
1174  p += neuronbias[i];
1175 
1176 
1177  freep[p++] = 0;
1178  freep[p++] = 15;
1179  freep[p++] = -15;
1180  freep[p] = 0;
1181 
1182 
1183 
1184  }
1185 
1186 //#define BPDEBUG
1187 
1188 #ifdef BPDEBUG
1189 #define bpdebug(x,...) printf(x,##__VA_ARGS__)
1190 #define debug(x,...) printf(x,##__VA_ARGS__)
1191 #else
1192 #define bpdebug(x,...)
1193 #define debug(x,...)
1194 #endif
1195 
1196 #define inRange(x,y,z) ( (x) >= (y) && (x) < (y)+(z) )
1197 
1198  int Evonet::isHidden(int neuron){
1199  return neuron >= ninputs && neuron < ninputs+nhiddens;
1200  }
1201 
1202 /* float Evonet::backPropStep(QVector<float> tInput, double rate) {
1203 
1204  float globalError = 0;
1205 
1206  float delta[MAXN];
1207  float error[MAXN];
1208 
1209  int b;
1210  int nbiases = 0;
1211 
1212  for (int i = 0; i<nneurons; i++, nbiases += neuronbias[i]);
1213 
1214  bpdebug("%s net: %p, net_blocks: %p\n",__PRETTY_FUNCTION__,this, net_block);
1215 
1216 #ifdef BPDEBUG
1217  printf("netinput: ");
1218 
1219  for (int i = 0; i<ninputs; i++) {
1220  printf("%f ",input[i]);
1221  }
1222  printf("\n");
1223 
1224  printWeights();
1225  printIO();
1226 #endif
1227 
1228  bpdebug("nbiases: %d\n\n", nbiases);
1229 
1230  //*
1231  // Storing offsets to easily obtain the staring position
1232  // of the weights for each connection block
1233  //*
1234 
1235  bpdebug("Offsets:");
1236 
1237  QVector<int> offsets;
1238  offsets.push_back(nbiases);
1239  bpdebug("\t%d", offsets[0]);
1240 
1241  int p = 0;
1242 
1243  for (b = 0; b<net_nblocks; b++) {
1244  if (net_block[b][0]==0) {
1245  offsets.push_back(offsets[p++] + net_block[b][2]*net_block[b][4]);
1246  bpdebug("\t%d", offsets[p]);
1247  break;
1248  }
1249  }
1250 
1251 
1252  int bias_ptr = offsets[0]-1;
1253  bpdebug("\nbias_ptr: %d\n\n", bias_ptr);
1254 
1255  //*
1256  // Compute the error for the output neurons
1257  //
1258  // NOTE : this assumes that the last connection block to train (net_block[b][5]==1) is the
1259  // one of the outputs. This code should be modified to consider the case in which the
1260  // connections to the output neurons are divided in two (or more) blocks.
1261  // It may be useful to have a function/flag to indicate wether a connection block involves
1262  // output neurons or not.
1263  //*
1264 
1265 
1266  bpdebug("Error for last block\n");
1267  for (b=net_nblocks-1; b>=0; b--) {
1268  if (! (net_block[b][0]==0 && net_block[b][5]==1) )
1269  continue;
1270 
1271  int end = net_block[b][1]+net_block[b][2];
1272 
1273  for (int i = net_block[b][1]; i<end; i++) {
1274 
1275  float d = tInput[i-net_block[b][1]] - act[i];
1276 
1277  error[i] = act[i]*(1-act[i]) * d;
1278  delta[i] = error[i] * rate;
1279 
1280  bpdebug("\terror[%d]= gradient*(tInput[i - net_block[%d][3]]-act[%d]) = act[%d]*(1-act[%d]) * (tInput[%d - %d = %d]-act[%d]) = %f*(1-%f) * (%f - %f) = %f * %f = %f\n",i,b,i,i,i,i,net_block[b][1],i-net_block[b][1],i,act[i],act[i],tInput[i-net_block[b][1]],act[i],act[i]*(1-act[i]),(tInput[i-net_block[b][1]] - act[i]), error[i]);
1281  bpdebug("\terror[%d]= %f\n",i,error[i]);
1282 
1283  globalError+= d*d;
1284  }
1285 
1286  break;
1287  }
1288 
1289 
1290  //*
1291  // Backpropagate the error
1292  //*
1293 
1294  bpdebug("\nBackpropagate\n");
1295 
1296  p=offsets.size()-1;
1297  for (; b>=0; b--) {
1298 
1299  if (! (net_block[b][0]==0 && net_block[b][5]==1) )
1300  continue;
1301 
1302  //*
1303  // First compute the error for the lower layer. This must be done
1304  // before updating the weights (since the error depends on the weights).
1305  // This step is not necessary if the lower layer is the input one.
1306  //*
1307 
1308  //*
1309  // i iterates over the "lower" layer, j over the "upper"
1310  //*
1311 
1312  bpdebug("\tb: %d\n", b);
1313 
1314  if (net_block[b][3]+net_block[b][4] -1 > ninputs) {
1315 
1316  bpdebug("\tnetblock[%d][3]+net_block[%d][4] -1 = %d > %d => Computing the error\n", b,b,net_block[b][3]+net_block[b][4]-1, ninputs);
1317 
1318  for (int i = net_block[b][3]; i<net_block[b][3]+net_block[b][4]; i++) {
1319 
1320  bpdebug("\ti: %d\n", i);
1321  error[i] = 0;
1322 
1323  for (int j= net_block[b][1]; j<net_block[b][1]+net_block[b][2]; j++) {
1324 
1325  error[i]+= error[j] * freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]];
1326  bpdebug("\t\tj: %d\n", j);
1327  bpdebug("\t\terror[%d] += act[j] * freep[ offset[p] + (i-net_block[b][3]) + j*net_block[b][2] ] = act[%d] * freep[ %d + %d + %d*%d ] = %f * %f = %f\n", i,j,offsets[p],(i-net_block[b][3]),(j-net_block[b][1]),net_block[b][2], act[j], freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]], act[j]*freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]] );
1328  bpdebug("\t\terror[%d]: %f\n", i,error[i]);
1329  }
1330 
1331  error[i]*= act[i]*(1-act[i]);
1332  bpdebug("\t\terror[%d]: %f\n", i,error[i]);
1333  delta[i] = error[i]*rate;
1334  bpdebug("\t\tdelta[%d]: %f\n", i,delta[i]);
1335  }
1336 
1337  }
1338 
1339  //*
1340  // Then modify the weights of the connections from the lower to the upper layer
1341  //*
1342 
1343  bpdebug("\n\tUpdating weights\n");
1344  for (int j= net_block[b][1]+net_block[b][2]-1; j>=net_block[b][1]; j--) {
1345  bpdebug("\tj: %d\n", j);
1346 
1347  if (neuronbias[j]) {
1348  freep[bias_ptr--] += delta[j];
1349  bpdebug("\t\tNeuron has bias\n");
1350  bpdebug("\t\t\tfreep[bias_ptr] = freep[ %d ] += delta[%d] = %f\n", bias_ptr+1, j, delta[j]);
1351  bpdebug("\t\t\tfreep[%d]: %f\n", bias_ptr+1, freep[bias_ptr+1]);
1352  }
1353 
1354  for (int i = net_block[b][3]; i<net_block[b][3]+net_block[b][4]; i++) {
1355  freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4]] += delta[j] * act[i];
1356  bpdebug("\t\ti: %d\n", i);
1357  bpdebug("\t\t\tfreep[ offset[%d] + (i-net_block[%d][3]) + j*net_block[%d][2] ] = freep[ %d + %d + %d*%d = %d ] += delta[%d] * act[%d] = %f * %f = %f\n", p, b,b, offsets[p], i-net_block[b][3], j-net_block[b][1],net_block[b][4],offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4],j,i,delta[j],act[i],delta[j]*act[i] );
1358  bpdebug("\t\t\tfreep[ %d ] = %f\n", offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4], freep[offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4]]);
1359  }
1360 
1361 
1362  }
1363 
1364  p--;
1365 
1366  }
1367 
1368  bpdebug("\n\n");
1369  return globalError;
1370 
1371 
1372  }
1373 */
1374 
1375  void Evonet::printAct() {
1376 
1377  for (int i = 0; i<nneurons; i++) {
1378  printf("act[%d]: %f\n",i,act[i]);
1379  }
1380  }
1381 
1382 
1383 
1384  float Evonet::computeMeanSquaredError(QVector<float> trainingSet, QVector<float> desiredOutput) {
1385 
1386  float err = 0;
1387  int size = trainingSet.size()/ninputs;
1388 // int nOutputToTrain = desiredOutput.size() / size;
1389 
1390  int ptr = 0;
1391  double tmp;
1392 
1393  for (int i = 0; i<nneurons; i++) {
1394  act[i] = 0;
1395  }
1396 
1397 
1398  for (int i = 0; i<size; i++) {
1399 
1400  for (int j = 0; j<ninputs; j++) {
1401  setInput(j, trainingSet[i*ninputs + j]);
1402  }
1403 
1404  updateNet();
1405 
1406  for (int j=0; j<noutputs; j++) {
1407  if (!outputsToTrain[j])
1408  continue;
1409 
1410  tmp = desiredOutput[ptr++] - act[j+ninputs+nhiddens];
1411 // printf("d[%d] - act[%d] = %f - %f = %f\n",ptr-1,j+ninputs+nhiddens,desiredOutput[ptr-1 ], act[j+ninputs+nhiddens], tmp);
1412  err += tmp*tmp*err_weights[j]*err_weights[j];
1413 
1414  if (isinf(err)) {
1415  printf("INF!!\n");
1416  }
1417 
1418  }
1419 
1420  }
1421 
1422 // printf("err: %f\n",err);
1423 
1424  return err / (err_weight_sum*size);
1425 
1426  }
1427 
1428  int Evonet::extractWeightsFromNet(Eigen::VectorXf& w) {
1429 
1430  //*
1431  // This code assumes that input neurons have no bias and
1432  // none of the neurons has gain.
1433  //*
1434 
1435  int wPtr = 0, paramPtr = 0;
1436 
1437  int nbiases = 0;
1438  for (int i = 0; i<nneurons; i++) {
1439  nbiases += (neuronbias[i]==1);
1440  }
1441  paramPtr = nbiases;
1442 
1443  //*
1444  // Search for neurons to train
1445  //*
1446 
1447  for (int b = 0; b<net_nblocks; b++) {
1448 
1449  //*
1450  // Output neurons are treated separately. You need however to go through
1451  // all the blocks since it is not said that the output blocks will all be at the end.
1452  //*
1453 
1454  if (net_block[b][0] != 0)
1455  continue;
1456 
1457  if (net_block[b][5]==1 && !(net_block[b][1]>=ninputs+nhiddens)) {
1458  for (int i = net_block[b][1]; i<net_block[b][1]+net_block[b][2]; i++) {
1459 
1460  if (neuronbias[i]) {
1461 
1462  //*
1463  // If the neuron has a bias, in order to obtain the index of the
1464  // corresponding bias in vector freep, we iterate over all "previous"
1465  // neurons, counting those who have a bias.
1466  //*
1467 
1468  int ptr = 0;
1469  for (int j = 0; j<i; j++) {
1470  if(neuronbias[j])
1471  ptr++;
1472  }
1473 
1474  debug("Adding bias of neuron %d (freep[%d]) in w[%d]\n",i,ptr,wPtr);
1475  w[wPtr++] = freep[ptr];
1476  }
1477 
1478  //*
1479  // Adding weights of the connections of the i-th neurons with neurons
1480  // in the "lower" block
1481  //*
1482 
1483  for (int j = 0; j<net_block[b][4]; j++) {
1484  debug("Adding connection %d of neuron %d (freep[%d]) in w[%d]\n",j,i,paramPtr,wPtr);
1485  w[wPtr++] = freep[paramPtr++];
1486  }
1487  }
1488  }
1489  else
1490  paramPtr+= net_block[b][2]*net_block[b][4];
1491  }
1492 
1493 
1494  //*
1495  // Output neurons are stored sequentially, from the first to the last.
1496  //*
1497 
1498 
1499  for (int i = 0; i<noutputs; i++) {
1500  paramPtr = nbiases;
1501  if (!outputsToTrain[i])
1502  continue;
1503 
1504  int i_freep = i+ninputs+nhiddens;
1505 
1506  if (neuronbias[i_freep]) {
1507 
1508  //*
1509  // If the neuron has a bias, in order to obtain the index of the
1510  // corresponding bias in vector freep, we iterate over all "previous"
1511  // neurons, counting those who have a bias.
1512  //*
1513 
1514  int ptr = 0;
1515  for (int j = 0; j<i_freep; j++) {
1516  if(neuronbias[j])
1517  ptr++;
1518  }
1519 
1520  debug("Adding bias of output %d (freep[%d]) in w[%d]\n",i,ptr,wPtr);
1521  w[wPtr++] = freep[ptr];
1522  }
1523 
1524 
1525 
1526  for (int b = 0; b<net_nblocks; b++) {
1527 
1528  debug("Accessing trainingHiddenBlock[net_block[%d][3] = %d][%d]\n",b,net_block[b][3],i);
1529  if(!(trainingHiddenBlock[net_block[b][3]][i] && inRange(net_block[b][1], ninputs+nhiddens, noutputs) )) {
1530 // if (net_block[b][0] == 0) {
1531  paramPtr+= net_block[b][2]*net_block[b][4];
1532 // }
1533  debug("\tparamPtr: %d\n", paramPtr);
1534  continue;
1535  }
1536 
1537  //*
1538  // Iterate over hidden neurons in the current block
1539  //*
1540 
1541  for (int j = 0; j<net_block[b][4]; j++) {
1542  debug("Adding connection %d of output %d (freep[%d]) in w[%d]\n",j,i_freep,(i_freep-net_block[b][1])*net_block[b][4] + paramPtr,wPtr);
1543  w[wPtr++] = freep[(i_freep-net_block[b][1])*net_block[b][4] + paramPtr++];
1544  }
1545  }
1546 
1547  }
1548 
1549  for (int i = 0; i<w.size(); i++) {
1550  debug("%f\n",w[i]);
1551  }
1552 
1553 
1554  return wPtr;
1555  }
1556 
1557  int Evonet::importWeightsFromVector(Eigen::VectorXf& w) {
1558 
1559  //*
1560  // Inverse procedure of the extraction of the weights (see extractWeightsFromVector() ).
1561  //*
1562 
1563  int wPtr = 0, paramPtr = 0;
1564 
1565  int nbiases = 0;
1566  for (int i = 0; i<nneurons; i++) {
1567  nbiases += (neuronbias[i]==1);
1568  }
1569  paramPtr = nbiases;
1570 
1571  //*
1572  // Search for neurons to train
1573  //*
1574 
1575  for (int b = 0; b<net_nblocks; b++) {
1576 
1577  //*
1578  // Output neurons are treated separately. You need however to go through
1579  // all the blocks since it is not said that the output blocks will all be at the end.
1580  //*
1581 
1582  if (net_block[b][0] != 0)
1583  continue;
1584 
1585  if (net_block[b][5]==1 && !(net_block[b][1]>=ninputs+nhiddens)) {
1586  for (int i = net_block[b][1]; i<net_block[b][1]+net_block[b][2]; i++) {
1587 
1588  if (neuronbias[i]) {
1589 
1590  //*
1591  // If the neuron has a bias, in order to obtain the index of the
1592  // corresponding bias in vector freep, we iterate over all "previous"
1593  // neurons, counting those who have a bias.
1594  //*
1595 
1596  int ptr = 0;
1597  for (int j = 0; j<i; j++) {
1598  if(neuronbias[j])
1599  ptr++;
1600  }
1601 
1602  debug("Adding bias of neuron %d (w[%d]) in freep[%d]\n",i,wPtr,ptr);
1603  freep[ptr] = w[wPtr++];
1604  }
1605 
1606  //*
1607  // Adding weights of the connections of the i-th neurons with neurons
1608  // in the "lower" block
1609  //*
1610 
1611  for (int j = 0; j<net_block[b][4]; j++) {
1612  debug("Adding connection %d of neuron %d (w[%d]) in freep[%d]\n",j,i,wPtr,paramPtr);
1613  freep[paramPtr++] = w[wPtr++];
1614  }
1615  }
1616  }
1617  else
1618  paramPtr+= net_block[b][2]*net_block[b][4];
1619  }
1620 
1621 
1622  //*
1623  // Output neurons are stored sequentially, from the first to the last.
1624  //*
1625 
1626 
1627  for (int i = 0; i<noutputs; i++) {
1628  paramPtr = nbiases;
1629  if (!outputsToTrain[i])
1630  continue;
1631 
1632  int i_freep = i+ninputs+nhiddens;
1633 
1634  if (neuronbias[i_freep]) {
1635 
1636  //*
1637  // If the neuron has a bias, in order to obtain the index of the
1638  // corresponding bias in vector freep, we iterate over all "previous"
1639  // neurons, counting those who have a bias.
1640  //*
1641 
1642  int ptr = 0;
1643  for (int j = 0; j<i_freep; j++) {
1644  if(neuronbias[j])
1645  ptr++;
1646  }
1647  debug("Adding bias of output %d (w[%d]) in freep[%d]\n",i,wPtr,ptr);
1648  freep[ptr] = w[wPtr++];
1649  }
1650 
1651 
1652 
1653  for (int b = 0; b<net_nblocks; b++) {
1654 
1655  if(!(trainingHiddenBlock[net_block[b][3]][i] && inRange(net_block[b][1], ninputs+nhiddens, noutputs) )) {
1656 // if(! trainingHiddenBlock[net_block[b][3]][i]) {
1657  paramPtr+= net_block[b][2]*net_block[b][4];
1658  continue;
1659  }
1660 
1661  //*
1662  // Iterate over hidden neurons in the current block
1663  //*
1664 
1665  for (int j = 0; j<net_block[b][4]; j++) {
1666  debug("Adding connection %d of output %d (w[%d]) in freep[%d]\n",j,i_freep,wPtr,(i_freep-net_block[b][1])*net_block[b][4] + paramPtr);
1667  freep[(i_freep-net_block[b][1])*net_block[b][4] + paramPtr++] = w[wPtr++];
1668  }
1669  }
1670 
1671  }
1672 
1673  return wPtr;
1674  }
1675 
1676  float Evonet::getWeight(int to, int from) {
1677 
1678  debug("Getting w to %d from %d\n", to,from);
1679  int ptr = 0;
1680  for (int i = 0; i<nneurons; i++) {
1681  ptr += neuronbias[i]==1;
1682  }
1683 
1684  for (int b = 0; b<net_nblocks; b++) {
1685  if (inRange(to, net_block[b][1], net_block[b][2]) && inRange(from, net_block[b][3], net_block[b][4])) {
1686  ptr+= (to-net_block[b][1])*net_block[b][4]+(from-net_block[b][3]);
1687  }
1688  else {
1689  ptr+= net_block[b][2]*net_block[b][4];
1690  }
1691  }
1692 
1693  debug("Returning freep[%d]\n", ptr);
1694  if (ptr >= freeParameters()) {
1695  return 0;
1696  }
1697 
1698  return freep[ptr];
1699  }
1700 
1701  void Evonet::setWeight(int to, int from, float w) {
1702 
1703  int ptr = 0;
1704  for (int i = 0; i<nneurons; i++) {
1705  ptr += neuronbias[i]==1;
1706  }
1707 
1708  for (int b = 0; b<net_nblocks; b++) {
1709  if (inRange(to, net_block[b][1], net_block[b][2]) && inRange(from, net_block[b][3], net_block[b][4])) {
1710  ptr+= (to-net_block[b][1])*net_block[b][4]+(from-net_block[b][3]);
1711  }
1712  else {
1713  ptr+= net_block[b][2]*net_block[b][4];
1714  }
1715  }
1716 
1717  freep[ptr] = w;
1718 
1719 
1720  }
1721 
1722  float Evonet::derivative(int n, float x) {
1723 
1724  return x*(1-x);
1725 
1726  }
1727 
1728  void Evonet::prepareForTraining(QVector<float> &err_w) {
1729 
1730  nconnections = 0;
1731 
1732  //*
1733  // Initializing some variables in order to make easier the computation:
1734  //
1735  // nconnections: number of weights subject to training
1736  // outputsToTrain: outputsToTrain[i]==1 iff the i-th output is subject to training
1737  // n_outputsToTrain: number of output neurons to train
1738  //
1739  //*
1740 
1741 
1742  outputsToTrain = (char*)calloc(noutputs, sizeof(char));
1743  n_outputsToTrain = 0;
1744 
1745  trainingHiddenBlock = (char**)calloc(nneurons, sizeof(char*));
1746  for (int i = 0; i<nneurons; i++) {
1747  trainingHiddenBlock[i] = (char*) calloc(noutputs, sizeof(char));
1748  }
1749 
1750  for (int b=0; b<net_nblocks; b++)
1751 
1752  if (net_block[b][0]==0 && net_block[b][5]==1) {
1753  //*
1754  // If the current block is a connection block and the block is subject to training
1755  // count the connections of the current block
1756  //*
1757 
1758  nconnections += (net_block[b][2]*net_block[b][4]);
1759 
1760  //*
1761  // Sum the bias for the neurons in the current block
1762  //*
1763 
1764  for (int i = net_block[b][1]; i<net_block[b][1]+net_block[b][2]; i++) {
1765  nconnections += (neuronbias[i] == 1);
1766  }
1767 
1768  if (net_block[b][1] >= ninputs+nhiddens) {
1769  memset(outputsToTrain+net_block[b][1]-ninputs-nhiddens, 1, net_block[b][2]*sizeof(char));
1770  n_outputsToTrain += net_block[b][2];
1771 
1772  for(int j=0;j<net_block[b][4];j++)
1773  memset(&trainingHiddenBlock[net_block[b][3]+j][net_block[b][1]-ninputs-nhiddens], 1, net_block[b][2]*sizeof(char));
1774  }
1775 
1776  }
1777 
1778 #ifdef BPDEBUG
1779  printf("n_outputToTrain: %d\n",n_outputsToTrain);
1780  printf("output to train: ");
1781 
1782  for (int i = 0; i<noutputs; i++) {
1783  printf("%d ",outputsToTrain[i]);
1784  }
1785  printf("\n");
1786 
1787  for (int j = 0; j<nneurons; j++) {
1788  for (int i = 0; i<noutputs; i++) {
1789  printf("%d ",trainingHiddenBlock[j][i]);
1790  }
1791  printf("\n");
1792  }
1793 #endif
1794  debug("nconnections: %d\n", nconnections);
1795 
1796  err_weight_sum = 0;
1797  for (int i = 0; i<err_w.size(); i++) {
1798  err_weights.push_back(err_w[i]*err_w[i]);
1799  err_weight_sum+=err_w[i];
1800  }
1801 
1802  printf("err_weight_sum : %f\n",err_weight_sum);
1803 
1804  }
1805 
1806  void Evonet::endTraining() {
1807  free(outputsToTrain);
1808  for(int i=0;i<nneurons;i++)
1809  free(trainingHiddenBlock[i]);
1810  free(trainingHiddenBlock);
1811 
1812  }
1813 
1814  float Evonet::trainLevembergMarquardt(QVector<float> trainingSet, QVector<float> desiredOutput, float maxError) {
1815 
1816  training = true;
1817 
1818  int pattern, b;
1819 // int lastBlock;
1820  int cycles,i, j;
1821 
1822  double lambda=0.001;
1823  double currentError = 0, previousError= 0;
1824  double delta;
1825 
1826  if (nconnections == 0) {
1827  training = false;
1828  printf("nconnections: 0\nnothing to train\n");
1829  return 0;
1830  }
1831 
1832  int nbiases = 0;
1833  for (i = 0; i<nneurons; i++) {
1834  nbiases += neuronbias[i]==1;
1835  }
1836 
1837  int size = trainingSet.size() / ninputs;
1838  debug("npatters: %d\n", size);
1839 
1840  Eigen::VectorXf err(size*n_outputsToTrain);
1841  Eigen::MatrixXf jacobian(size*n_outputsToTrain, nconnections );
1842 // Eigen::MatrixXf inv_error_weight(size*n_outputsToTrain,size*n_outputsToTrain);
1843 // Eigen::MatrixXf jt_we(size*n_outputsToTrain, nconnections );
1844  Eigen::MatrixXf jj(nconnections,nconnections);
1845 
1846  Eigen::VectorXf new_weights(nconnections);
1847  Eigen::VectorXf old_weights(nconnections);
1848  Eigen::VectorXf ww_err(nconnections);
1849 
1850 
1851  previousError = computeMeanSquaredError(trainingSet, desiredOutput);
1852  printf("Initial error: %f\n",previousError);
1853 
1854 /* if (previousError < maxError) {
1855  printf("Error already below threshold - Nothing to do.\n");
1856  return previousError;
1857  }
1858  */
1859 
1860 // int end = (maxIterations > 0 ? maxIterations : INFINITY);
1861  int end = (maxIterations > 0 ? maxIterations : std::numeric_limits<int>::max());
1862  for (cycles = 0; cycles<end; cycles++) {
1863 
1864  jacobian.setZero();
1865 
1866  extractWeightsFromNet(old_weights);
1867  debug("weights extracted\n");
1868 
1869  //*
1870  // Iterating over patterns. This can be easily parallelized using OpenMP
1871  //*
1872 
1873  for (pattern=0; pattern<size; pattern++) {
1874 
1875  debug("\n\n------------\n\n");
1876  debug("\tpattern: %d\n", pattern);
1877 
1878  //*
1879  // Forward computation
1880  //
1881  // Beware in case of parallel computation: this is a critical section
1882  //*
1883 
1884  for (i = 0; i<ninputs; i++) {
1885  setInput(i, trainingSet[pattern*ninputs + i]);
1886  }
1887 // debug("Before update (%s):\n",__PRETTY_FUNCTION__);
1888 // printAct();
1889  updateNet();
1890 
1891 // debug("after update:\n");
1892 // printAct();
1893 
1894 
1895  //*
1896  // Backward computation
1897  //*
1898 
1899  for(int m = noutputs-1; m>=0; m--) {
1900  if (!outputsToTrain[m])
1901  continue;
1902 
1903  int m_freep = m+ninputs+nhiddens;
1904 
1905  int col_idx = nconnections - 1;
1906  int row_idx = n_outputsToTrain*pattern-1;
1907 
1908  for (i = 0; i<=m; i++) {
1909  row_idx+= outputsToTrain[i];
1910  }
1911 
1912  //*
1913  // Computing error and jacobian relative to the output layer
1914  //*
1915 
1916  err[row_idx] = (desiredOutput[row_idx] - act[m_freep])*err_weights[m];
1917  delta = -derivative(m_freep, act[m_freep])*err_weights[m];
1918 
1919  //*
1920  // Iterating over output neurons
1921  //*
1922 
1923  for(i = noutputs-1; i>=0; i--) {
1924 
1925  if (!outputsToTrain[i])
1926  continue;
1927  //*
1928  // Iterating over net blocks in order to get the neurons connected with i-th output neuron
1929  //*
1930 
1931  for (b=net_nblocks-1; b>=0; b--) {
1932 
1933  //*
1934  // Check if current block is connected to i-th output
1935  //*
1936 // if (net_block[b][5]!=1 && inRange(i+ninputs+nhiddens,net_block[b][3],net_block[b][4]) ) {
1937  if (trainingHiddenBlock[net_block[b][3]][m] && net_block[b][5]==1) {
1938 
1939  for (j=net_block[b][3]+net_block[b][4] -1; j>=net_block[b][3]; j--) {
1940  if (i==m) {
1941  jacobian(row_idx, col_idx--) = delta *act[j];
1942  debug("\t\tcol_idx: %d\n", col_idx+1);
1943  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta,act[j],delta*act[j]);
1944  }
1945  else {
1946  jacobian(row_idx, col_idx--) = 0;
1947  debug("\t\tcol_idx: %d\n", col_idx+1);
1948  debug("\t\tjacobian(%d,%d) = 0\n", row_idx,col_idx+1);
1949  }
1950 
1951 
1952  } //end loop over j
1953 
1954  } // end if
1955 
1956  } //end loop over b
1957 
1958  //*
1959  // Consider the derivative of the error with respect to the bias of the output neuron
1960  //*
1961  if (neuronbias[i+ninputs+nhiddens]) {
1962  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,delta);
1963  jacobian(row_idx, col_idx--) = (i==m ? delta : 0);
1964  }
1965 
1966  } // end loop over i
1967 
1968  //*
1969  // Backpropagating the error over hidden neurons.
1970  // This follows the order of the blocks, from the last to the first.
1971  //
1972  // NOTE: this code assumes the net is a 3 layer net (input-hidden-output).
1973  // It will not work in case of nets with more than 1 hidden layer.
1974  //*
1975  debug("\nBackward computation: hidden layer\n");
1976 
1977 
1978  for (b=net_nblocks-1; b>=0; b--) {
1979 
1980  //*
1981  // If it's not a hidden block subject to training connected to the current (m-th) output
1982  //*
1983 
1984 // if ( !( net_block[b][0]==0 && net_block[b][5]==1 && isHidden(net_block[b][1]) && inRange(m_freep,net_block[b][3],net_block[b][4]) ) )
1985  debug("\ttrainingHiddenBlock[%d][%d]: %d\n", net_block[b][1],m,trainingHiddenBlock[net_block[b][1]][m]);
1986  if (net_block[b][0]!=0 || net_block[b][5] !=1 || ! trainingHiddenBlock[net_block[b][1]][m] )
1987  continue;
1988 
1989  //*
1990  // Iterate over hidden neurons in the current block. The computation is clear knowing the algorithm.
1991  //*
1992 
1993  for(j = net_block[b][1]+net_block[b][2]-1; j>= net_block[b][1]; j--) {
1994 
1995  double delta_h = delta* getWeight(m_freep, j) * derivative(j, act[j]);
1996 
1997  for (int k = net_block[b][3] + net_block[b][4]-1; k>= net_block[b][3]; k--) {
1998  jacobian(row_idx, col_idx--) = delta_h * act[k];
1999  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta_h,act[k],delta*act[k]);
2000  }
2001 
2002  if (neuronbias[j]) {
2003  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,delta_h);
2004  jacobian(row_idx, col_idx--) = delta_h;
2005  }
2006 
2007 
2008  }
2009 
2010 
2011 
2012  } //end loop over b
2013 
2014  } // end loop over m (output neurons)
2015 
2016  } //end pattern
2017 
2018 
2019 
2020  debug("\tAll rows analyzed\n");
2021 
2022 
2023 #ifdef BPDEBUG
2024 // std::cout<<"jacobian:\n"<<jacobian;
2025 // std::cout<<"\n\ndet(j^Tj) :\n"<<(jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant() << "\n";
2026 
2027 // std::cout<<"\n\nerror: " << err.transpose() << "\n";
2028 #endif
2029 
2030  //*
2031  // new_weights = old_weights - (J^T J + lambda I)^-1 J^T e ;
2032  //*
2033 
2034  if (lambda > 100000000 || lambda < 0.000001) {
2035  lambda = 1;
2036  }
2037 
2038 
2039 // jt_we = jacobian.transpose();//*inv_error_weight;
2040  ww_err = jacobian.transpose()*err;
2041  jj = jacobian.transpose()*jacobian;
2042 // printf("det: %lf\n",jj.determinant());
2043 
2044 /* if (std::isnan((jacobian.transpose()*jacobian).determinant()) ) {
2045 
2046  printf("nan determinant : %f\n", (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant());
2047 
2048 
2049 
2050  FILE *fp = fopen("/Users/Manlio/Desktop/matrix.txt", "w");
2051 
2052  Eigen::MatrixXf pj(nconnections,nconnections);
2053 
2054  pj = jacobian.transpose()*jacobian;
2055 
2056 
2057  for(int i = 0;i<nconnections; i++)
2058  for(int j=0;j<nconnections; j++)
2059  if (!(std::isnormal(pj(i,j)) || pj(i,j)==0 ) ) {
2060  printf("(%d,%d) : %f\n",i,j,pj(i,j));
2061  }
2062 
2063 
2064 
2065  for (int a =0; a<nconnections; a++) {
2066  for (int b = 0; b<nconnections; b++) {
2067  fprintf(fp, "%f ", pj(a,b));
2068  }
2069  fprintf(fp, "\n");
2070  }
2071 
2072  fclose(fp);
2073  exit(0);
2074 
2075  }
2076 */
2077  for (int retry = 0; retry<6; retry++, lambda*=10) {
2078 
2079  debug("\tlambda: %f\n", lambda);
2080 
2081 // new_weights = old_weights - (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).inverse()*jacobian.transpose()*err;
2082  new_weights = old_weights - (jj + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).ldlt().solve(ww_err);
2083 
2084  // printf("\tdet(j^Tj) : %f -- norm(j^T e) : %f\n",(jacobian.transpose()*jacobian).determinant(), (jacobian.transpose()*err).norm() );
2085 // printf("norm wb2: %f\n",new_weights.norm());
2086 // exit(0);
2087 
2088 // std::cout<<"\n\nnew_weights: " << new_weights.transpose() << "\n";
2089 
2090 
2091  importWeightsFromVector(new_weights);
2092 
2093  currentError = computeMeanSquaredError(trainingSet, desiredOutput);
2094 
2095  printf("iteration: %d err: %f lambda: %f\n",cycles,currentError,lambda);
2096 
2097  debug("currentError: %f\n",currentError);
2098 
2099  if (currentError <= maxError)
2100  return currentError;
2101  if ((new_weights-old_weights).norm() < 0.0001) {
2102  printf("Minimum gradient reached\n");
2103  return currentError;
2104  }
2105 
2106  if (currentError > previousError) {
2107  importWeightsFromVector(old_weights);
2108  }
2109  else {
2110  previousError = currentError;
2111  lambda/=10;
2112  break;
2113  }
2114 
2115  }
2116 // exit(1);
2117  }
2118 
2119  training = false;
2120  return currentError;
2121 
2122  }
2123 
2124  float Evonet::trainLevembergMarquardtThroughTime(QVector<float> trainingSet, QVector<float> desiredOutput, int time, float maxError) {
2125 
2126  training = true;
2127 
2128  int pattern, b;
2129  // int lastBlock;
2130  int cycles,i, j;
2131 
2132  double lambda=0.001;
2133  double currentError = 0, previousError= 0;
2134  double delta;
2135 
2136  if (nconnections == 0) {
2137  training = false;
2138  printf("nconnections: 0\nnothing to train\n");
2139  return 0;
2140  }
2141 
2142  int nbiases = 0;
2143  for (i = 0; i<nneurons; i++) {
2144  nbiases += neuronbias[i]==1;
2145  }
2146 
2147  int size = trainingSet.size() / ninputs;
2148  debug("npatters: %d\n", size);
2149 
2150  Eigen::VectorXf oldActivations(time*nhiddens);
2151  oldActivations.setZero();
2152 
2153  Eigen::VectorXf err(size*n_outputsToTrain);
2154  Eigen::MatrixXf jacobian(size*n_outputsToTrain, nconnections );
2155 // Eigen::MatrixXf inv_error_weight(size*n_outputsToTrain,size*n_outputsToTrain);
2156 // Eigen::MatrixXf jt_we(size*n_outputsToTrain, nconnections );
2157  Eigen::MatrixXf jj(nconnections,nconnections);
2158 
2159  Eigen::VectorXf new_weights(nconnections);
2160  Eigen::VectorXf old_weights(nconnections);
2161  Eigen::VectorXf ww_err(nconnections);
2162 
2163 
2164 
2165  previousError = computeMeanSquaredError(trainingSet, desiredOutput);
2166  printf("Initial error: %f\n",previousError);
2167 
2168  /* if (previousError < maxError) {
2169  printf("Error already below threshold - Nothing to do.\n");
2170  return previousError;
2171  }
2172  */
2173 
2174 // int end = (maxIterations > 0 ? maxIterations : INFINITY);
2175  int end = (maxIterations > 0 ? maxIterations : std::numeric_limits<int>::max());
2176  for (cycles = 0; cycles<end; cycles++) {
2177 
2178  jacobian.setZero();
2179 
2180  extractWeightsFromNet(old_weights);
2181  debug("weights extracted\n");
2182 
2183  //*
2184  // Iterating over patterns. This can be easily parallelized using OpenMP
2185  //*
2186 
2187  for (pattern=0; pattern<size; pattern++) {
2188 
2189  debug("\n\n------------\n\n");
2190  debug("\tpattern: %d\n", pattern);
2191 
2192  //*
2193  // Forward computation
2194  //
2195  // Beware in case of parallel computation: this is a critical section
2196  //*
2197 
2198  for (i = 0; i<ninputs; i++) {
2199  setInput(i, trainingSet[pattern*ninputs + i]);
2200  }
2201  // debug("Before update (%s):\n",__PRETTY_FUNCTION__);
2202  // printAct();
2203  updateNet();
2204 
2205  // debug("after update:\n");
2206  // printAct();
2207 
2208 
2209  //*
2210  // Backward computation
2211  //*
2212 
2213  for(int m = noutputs-1; m>=0; m--) {
2214 
2215  debug("m: %d\n", m);
2216  if (!outputsToTrain[m])
2217  continue;
2218 
2219  int m_freep = m+ninputs+nhiddens;
2220 
2221  int col_idx = nconnections - 1;
2222  int row_idx = n_outputsToTrain*pattern-1;
2223 
2224  debug("row_idx: %d\n", row_idx);
2225  for (i = 0; i<=m; i++) {
2226  row_idx+= outputsToTrain[i];
2227  }
2228  debug("row_idx: %d\n", row_idx);
2229  //*
2230  // Computing error and jacobian relative to the output layer
2231  //*
2232 
2233  err[row_idx] = (desiredOutput[row_idx] - act[m_freep])*err_weights[m];
2234  delta = -derivative(m_freep, act[m_freep])*err_weights[m];
2235 
2236  //*
2237  // Iterating over output neurons
2238  //*
2239 
2240  for(i = noutputs-1; i>=0; i--) {
2241 
2242  debug("\toutput: %d\n", i);
2243  if (!outputsToTrain[i])
2244  continue;
2245  //*
2246  // Iterating over net blocks in order to get the neurons connected with i-th output neuron
2247  //*
2248 
2249  for (b=net_nblocks-1; b>=0; b--) {
2250 
2251  //*
2252  // Check if current block is connected to i-th output
2253  //*
2254  // if (net_block[b][5]!=1 && inRange(i+ninputs+nhiddens,net_block[b][3],net_block[b][4]) ) {
2255  if (trainingHiddenBlock[net_block[b][3]][m]) {
2256 
2257  for (j=net_block[b][3]+net_block[b][4] -1; j>=net_block[b][3]; j--) {
2258  if (i==m) {
2259  jacobian(row_idx, col_idx--) = delta *act[j];
2260  debug("\t\tcol_idx: %d\n", col_idx+1);
2261  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta,act[j],delta*act[j]);
2262  }
2263  else {
2264  jacobian(row_idx, col_idx--) = 0;
2265  debug("\t\tcol_idx: %d\n", col_idx+1);
2266  debug("\t\tjacobian(%d,%d) = 0\n", row_idx,col_idx+1);
2267  }
2268 
2269 
2270  } //end loop over j
2271 
2272  } // end if
2273 
2274  } //end loop over b
2275 
2276  //*
2277  // Consider the derivative of the error with respect to the bias of the output neuron
2278  //*
2279  if (neuronbias[i+ninputs+nhiddens]) {
2280  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,(i==m ? delta : 0));
2281  jacobian(row_idx, col_idx--) = (i==m ? delta : 0);
2282  }
2283 
2284  } // end loop over i
2285 
2286  //*
2287  // Backpropagating the error over hidden neurons.
2288  // This follows the order of the blocks, from the last to the first.
2289  //
2290  // NOTE: this code assumes the net is a 3 layer net (input-hidden-output).
2291  // It will not work in case of nets with more than 1 hidden layer.
2292  //*
2293  debug("\nBackward computation: hidden layer\n");
2294 
2295 
2296  for (b=net_nblocks-1; b>=0; b--) {
2297 
2298  //*
2299  // If it's not a hidden block subject to training connected to the current (m-th) output
2300  //*
2301 
2302  // if ( !( net_block[b][0]==0 && net_block[b][5]==1 && isHidden(net_block[b][1]) && inRange(m_freep,net_block[b][3],net_block[b][4]) ) )
2303  debug("\ttrainingHiddenBlock[%d][%d]: %d\n", net_block[b][1],m,trainingHiddenBlock[net_block[b][1]][m]);
2304  if (net_block[b][0]!=0 || ! trainingHiddenBlock[net_block[b][1]][m] )
2305  continue;
2306 
2307  //*
2308  // Iterate over hidden neurons in the current block. The computation is clear knowing the algorithm.
2309  //*
2310 #ifdef __GNUC__
2311  #warning The trainLevembergMarquardtThroughTime method requires that all the connections to a particular hidden block are in the same net_block.
2312 #endif
2313 
2314  for(j = net_block[b][1]+net_block[b][2]-1; j>= net_block[b][1]; j--) {
2315 
2316  double delta_h = delta* getWeight(m_freep, j) * derivative(j, act[j]);
2317 
2318  for (int k = net_block[b][3] + net_block[b][4]-1; k>= net_block[b][3]; k--) {
2319 
2320  jacobian(row_idx, col_idx--) = delta_h * (isHidden(k) ? oldActivations[k-ninputs] : act[k]);
2321  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta_h,act[k],delta*act[k]);
2322  }
2323 
2324  if (neuronbias[j]) {
2325  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,delta_h);
2326  jacobian(row_idx, col_idx--) = delta_h;
2327  }
2328 
2329 
2330  }
2331 
2332 
2333 
2334  } //end loop over b
2335 
2336  } // end loop over m (output neurons)
2337 
2338 
2339  //*
2340  // Updating the old activations
2341  //*
2342 
2343  for (int i = 0; i<nhiddens; i++) {
2344  oldActivations[i] = act[i+ninputs];
2345  }
2346 
2347 
2348 
2349 
2350  } //end pattern
2351 
2352 
2353 
2354  debug("\tAll rows analyzed\n");
2355 
2356 
2357 #ifdef BPDEBUG
2358  // std::cout<<"jacobian:\n"<<jacobian;
2359  // std::cout<<"\n\ndet(j^Tj) :\n"<<(jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant() << "\n";
2360 
2361  // std::cout<<"\n\nerror: " << err.transpose() << "\n";
2362 #endif
2363 
2364  //*
2365  // new_weights = old_weights - (J^T J + lambda I)^-1 J^T e ;
2366  //*
2367 
2368  if (lambda > 100000000 || lambda < 0.000001) {
2369  lambda = 1;
2370  }
2371 
2372 
2373  // jt_we = jacobian.transpose();//*inv_error_weight;
2374  ww_err = jacobian.transpose()*err;
2375  jj = jacobian.transpose()*jacobian;
2376  // printf("det: %lf\n",jj.determinant());
2377 
2378  /* if (std::isnan((jacobian.transpose()*jacobian).determinant()) ) {
2379 
2380  printf("nan determinant : %f\n", (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant());
2381 
2382 
2383 
2384  FILE *fp = fopen("/Users/Manlio/Desktop/matrix.txt", "w");
2385 
2386  Eigen::MatrixXf pj(nconnections,nconnections);
2387 
2388  pj = jacobian.transpose()*jacobian;
2389 
2390 
2391  for(int i = 0;i<nconnections; i++)
2392  for(int j=0;j<nconnections; j++)
2393  if (!(std::isnormal(pj(i,j)) || pj(i,j)==0 ) ) {
2394  printf("(%d,%d) : %f\n",i,j,pj(i,j));
2395  }
2396 
2397 
2398 
2399  for (int a =0; a<nconnections; a++) {
2400  for (int b = 0; b<nconnections; b++) {
2401  fprintf(fp, "%f ", pj(a,b));
2402  }
2403  fprintf(fp, "\n");
2404  }
2405 
2406  fclose(fp);
2407  exit(0);
2408 
2409  }
2410  */
2411  for (int retry = 0; retry<6; retry++, lambda*=10) {
2412 
2413  debug("\tlambda: %f\n", lambda);
2414 
2415  // new_weights = old_weights - (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).inverse()*jacobian.transpose()*err;
2416  new_weights = old_weights - (jj + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).ldlt().solve(ww_err);
2417 
2418  // printf("\tdet(j^Tj) : %f -- norm(j^T e) : %f\n",(jacobian.transpose()*jacobian).determinant(), (jacobian.transpose()*err).norm() );
2419  // printf("norm wb2: %f\n",new_weights.norm());
2420  // exit(0);
2421 
2422  // std::cout<<"\n\nnew_weights: " << new_weights.transpose() << "\n";
2423 
2424 
2425  importWeightsFromVector(new_weights);
2426 
2427  currentError = computeMeanSquaredError(trainingSet, desiredOutput);
2428 
2429  printf("iteration: %d err: %f lambda: %f\n",cycles,currentError,lambda);
2430 
2431  debug("currentError: %f\n",currentError);
2432 
2433  if (currentError <= maxError)
2434  return currentError;
2435  if ((new_weights-old_weights).norm() < 0.0001) {
2436  printf("Minimum gradient reached\n");
2437  return currentError;
2438  }
2439 
2440  if (currentError > previousError) {
2441  importWeightsFromVector(old_weights);
2442  }
2443  else {
2444  previousError = currentError;
2445  lambda/=10;
2446  break;
2447  }
2448 
2449  }
2450  // exit(1);
2451  }
2452 
2453  training = false;
2454  return currentError;
2455 
2456  }
2457 
2458  int Evonet::importWeightsFromMATLABFile(char *path) {
2459 
2460  //*
2461  // NOTE: This code has been written just to work. It assumes the net has
2462  // only two blocks to train: one connecting inputs with hidden neurons
2463  // and the other one connecting hidden with outputs.
2464  //*
2465 
2466  FILE *fp = fopen(path, "r");
2467 
2468  if (!fp)
2469  return -1 ;
2470 
2471  int biasptr = 0;
2472  int wptr = 0;
2473 
2474  for (int i = 0; i<nneurons; i++) {
2475  wptr += (neuronbias[i]==1);
2476  }
2477 
2478 
2479  int b;
2480 
2481  for (b = 0; b<net_nblocks; b++) {
2482  if (net_block[b][5]==1) {
2483  for (int i = 0; i<net_block[b][4]; i++) {
2484  for (int j = 0; j<net_block[b][2]; j++) {
2485  fscanf(fp, "%f", &freep[ wptr+i+j*net_block[b][4] ]);
2486 // printf("setting weight to %d from %d in freep[%d] (%f)\n",j+net_block[b][1],i+net_block[b][3],wptr+i+j*net_block[b][4],freep[wptr+i+j*net_block[b][4]]);
2487  }
2488  }
2489 
2490  wptr+=net_block[b][2]*net_block[b][4];
2491 
2492  biasptr=0;
2493 
2494  for (int j=0; j<net_block[b][1]; j++) {
2495  if (neuronbias[j]) {
2496  biasptr++;
2497  }
2498  }
2499 
2500  for (int i =0; i<net_block[b][2]; i++) {
2501  fscanf(fp, "%f", &freep[biasptr++]);
2502 // printf("setting bias of %d in freep[%d] (%f)\n",i+net_block[b][1],biasptr-1,freep[biasptr-1]);
2503  }
2504  }
2505  else if (net_block[b][0]==0) {
2506  wptr+= net_block[b][2]*net_block[b][4];
2507 /* for (int i = net_block[b][3]; i<net_block[b][3]+ net_block[b][4]; i++) {
2508  if (neuronbias[i]) {
2509  biasptr++;
2510  }
2511  }
2512 */
2513  }
2514  }
2515 
2516  fclose(fp);
2517 
2518  return 0;
2519  }
2520 
2521  int Evonet::exportWeightsToMATLABFile(char *path) {
2522 
2523  //*
2524  // NOTE: This code has been written just to work. It assumes the net has
2525  // only two blocks to train: one connecting inputs with hidden neurons
2526  // and the other one connecting hidden with outputs.
2527  //*
2528 
2529  FILE *fp = fopen(path, "w");
2530 
2531  if (!fp)
2532  return -1 ;
2533 
2534  int biasptr = 0;
2535  int wptr = 0;
2536 
2537  for (int i = 0; i<nneurons; i++){
2538  wptr += (neuronbias[i]==1);
2539  }
2540 
2541 
2542  int b;
2543 
2544  for (b = 0; b<net_nblocks; b++) {
2545  if (net_block[b][5]==1) {
2546  for (int i = 0; i<net_block[b][4]; i++) {
2547  for (int j = 0; j<net_block[b][2]; j++) {
2548  fprintf(fp, "%f\n", freep[ wptr+i+j*net_block[b][4] ]);
2549 // printf("setting weight to %d from %d in freep[%d] (%f)\n",j+net_block[b][1],i+net_block[b][3],wptr+i+j*net_block[b][4],freep[wptr+i+j*net_block[b][4]]);
2550  }
2551  }
2552 
2553  wptr+=net_block[b][2]*net_block[b][4];
2554 
2555  biasptr=0;
2556 
2557  for (int j=0; j<net_block[b][1]; j++) {
2558  if (neuronbias[j]) {
2559  biasptr++;
2560  }
2561  }
2562 
2563  for (int i =0; i<net_block[b][2]; i++) {
2564  fprintf(fp, "%f\n", freep[biasptr++]);
2565 // printf("setting bias of %d in freep[%d] (%f)\n",i+net_block[b][1],biasptr-1,freep[biasptr-1]);
2566  }
2567  }
2568  else if (net_block[b][0]==0) {
2569  wptr+= net_block[b][2]*net_block[b][4];
2570  /* for (int i = net_block[b][3]; i<net_block[b][3]+ net_block[b][4]; i++) {
2571  if (neuronbias[i]) {
2572  biasptr++;
2573  }
2574  }
2575  */
2576  }
2577  }
2578 
2579  fclose(fp);
2580 
2581  return 0;
2582  }
2583 
2584  void Evonet::setNeckReflex() {
2585  //Serve ad impostare i pesi del riflesso
2586  //
2587  //Si basa su alcune assunzioni sulla struttura della rete. Non generale!
2588  //
2589 
2590  int p = 0;
2591  for (int i = 0; i<nneurons; i++) {
2592  p += neuronbias[i]==1;
2593  }
2594 
2595  freep[p++] = 0;
2596  freep[p++] = 5;
2597  freep[p++] = -5;
2598  freep[p] = 0;
2599 
2600 // printf("%s: p: %d\n",__PRETTY_FUNCTION__,p);
2601 // freep[14] = 0;
2602 // freep[15] = 0;
2603 
2604  }
2605 
2606 } // end namespace farsa
2607 
2608 
2609 // All the suff below is to restore the warning state on Windows
2610 #if defined(_MSC_VER)
2611  #pragma warning(pop)
2612 #endif
2613 
2614 
2615 
2616 /*
2617  for (i = net_block[b][3]; i<net_block[b][3]+net_block[b][4]; i++) {
2618  err[i] = 0;
2619 
2620  for (j = net_block[b][1]; j<net_block[b][1]+net_block[b][2]; j++) {
2621  err[i] += freep[ngains+nbias+]
2622  }
2623  }
2624  for (i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
2625  if (t >= ninputs) {
2626  float dp_rate = delta[t] * rate;
2627  // calculate the delta for the lower neuron
2628  delta[i] += delta[t] * *p;
2629  // modify the weight
2630  *p += act[i] * dp_rate;
2631  }
2632  p++;
2633  }
2634 
2635  */