evonet.cpp
1 /********************************************************************************
2  * FARSA Experiments Library *
3  * Copyright (C) 2007-2012 *
4  * Stefano Nolfi <stefano.nolfi@istc.cnr.it> *
5  * Onofrio Gigliotta <onofrio.gigliotta@istc.cnr.it> *
6  * Gianluca Massera <emmegian@yahoo.it> *
7  * Tomassino Ferrauto <tomassino.ferrauto@istc.cnr.it> *
8  * *
9  * This program is free software; you can redistribute it and/or modify *
10  * it under the terms of the GNU General Public License as published by *
11  * the Free Software Foundation; either version 2 of the License, or *
12  * (at your option) any later version. *
13  * *
14  * This program is distributed in the hope that it will be useful, *
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17  * GNU General Public License for more details. *
18  * *
19  * You should have received a copy of the GNU General Public License *
20  * along with this program; if not, write to the Free Software *
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *
22  ********************************************************************************/
23 
24 #include "evonet.h"
25 #include "logger.h"
26 #include "configurationhelper.h"
27 #include "evonetui.h"
28 #include "mathutils.h"
29 #include <QFileInfo>
30 #include <cstring>
31 #include <limits>
32 #include <algorithm>
33 
34 #include <Eigen/Dense>
35 
36 #ifndef FARSA_MAC
37  #include <malloc.h> // DEBUG: to be sobstituted with new
38 #endif
39 
40 // All the suff below is to avoid warnings on Windows about the use of unsafe
41 // functions. This should be only a temporary workaround, the solution is stop
42 // using C string and file functions...
43 #if defined(_MSC_VER)
44  #pragma warning(push)
45  #pragma warning(disable:4996)
46 #endif
47 
48 namespace farsa {
49 
50 // MAXN and MAXSTOREDACTIVATIONS are declared in .h and their value is set there, but that is not a
51 // definition. This means that if you try to get the address of MAXN or MAXSTOREDACTIVATIONS you get
52 // a linker error (this also happends if you pass one of them to a function that takes a reference
53 // or const reference). However, we cannot initialize them here because these are used to define
54 // arrays, so their value must be present in the .h
55 // We must however not define them on Visual Studio 2008...
56 #if !defined(_MSC_VER) || _MSC_VER > 1600
58 const int Evonet::MAXN;
59 #endif
60 const float Evonet::DEFAULT_VALUE = -99.0f;
61 
63  : neuronsMonitorUploader(20, DataUploader<ActivationsToGui>::IncreaseQueueSize /*BlockUploader*/, NULL) // we can be ahead of GUI by at most 20 steps, then we are blocked
64 {
65  wrange = 5.0; // weight range
66  grange = 5.0; // gain range
67  brange = 5.0; // bias range
68  neuronlesions = 0;
69  freep = new float[1000];
70  backpropfreep = new float[1000];
71  teachingInput.fill(0.0, MAXN);
72  phep = NULL;
73  muts = NULL;
74  geneMaxValue = 255;
75  pheloaded = false;
76  selectedp= (float **) malloc(100 * sizeof(float **));
77  for (int i = 0; i < MAXN; i++) {
78  neuronl[i][0] = '\0';
79  neurondisplay[i] = 1;
80  neuronrange[i][0] = 0.0;
81  neuronrange[i][1] = 1.0;
82  neurondcolor[i] = Qt::black;
83  neuronlesion[i] = false;
84  neuronlesionVal[i] = 0.0;
85  }
86  net_nblocks = 0;
87 
88  nextStoredActivation = 0;
89  firstStoredActivation = 0;
90  updatescounter = 0;
91 
92  training = false;
93 }
94 
95 void Evonet::setNetworkName(const QString& name)
96 {
97  networkName = name;
98 }
99 
100 const QString& Evonet::getNetworkName() const
101 {
102  return networkName;
103 }
104 
105 void Evonet::configure(ConfigurationParameters& params, QString prefix) {
106  int nSensors = ConfigurationHelper::getInt( params, prefix+"nSensors", 0 );
107  int nHiddens = ConfigurationHelper::getInt( params, prefix+"nHiddens", 0 );
108  int nMotors = ConfigurationHelper::getInt( params, prefix+"nMotors", 0 );
109  QString netFile = ConfigurationHelper::getString( params, prefix+"netFile", "" );
110  // --- some parameters are in conflicts
111  if ( netFile != "" && (nSensors+nHiddens+nMotors)>0 ) {
112  Logger::error( "Evonet - The information inside netFile will override any specification in all others parameters of Evonet" );
113  }
114  wrange = ConfigurationHelper::getDouble(params, prefix + "weightRange", 5.0); // the range of synaptic weights
115  grange = ConfigurationHelper::getDouble(params, prefix + "gainRange", 5.0); // the range of gains
116  brange = ConfigurationHelper::getDouble(params, prefix + "biasRange", 5.0); // the range of biases
117  showTInput = ConfigurationHelper::getBool(params, prefix + "showTeachingInput", false); // flag for showing teaching input
118 
119  if ( netFile.isEmpty() ) {
120  // generate a neural network from parameters
121  ninputs = nSensors;
122  nhiddens = nHiddens;
123  noutputs = nMotors;
124  nneurons = ninputs + nhiddens + noutputs;
125  if (this->nneurons > MAXN) {
126  ConfigurationHelper::throwUserConfigError(prefix + "(nSensors + nHiddens + nMotors)", QString::number(nneurons), "Too many neurons: increase MAXN to support more than " + QString::number(MAXN) + " neurons");
127  }
128  int inputNeuronType = 0;
129  QString str = ConfigurationHelper::getString( params, prefix + "inputNeuronType", "no_delta" );
130  if ( str == QString("no_delta") ) {
131  inputNeuronType = 0;
132  } else if ( str == QString("with_delta") ) {
133  inputNeuronType = 1;
134  } else {
135  ConfigurationHelper::throwUserConfigError(prefix + "inputNeuronType", str, "Wrong value (use \"no_delta\" or \"with_delta\"");
136  }
137  int hiddenNeuronType = 0;
138  str = ConfigurationHelper::getString( params, prefix + "hiddenNeuronType", "logistic" );
139  if ( str == QString("logistic") ) {
140  hiddenNeuronType = 0;
141  } else if ( str == QString("logistic+delta") ) {
142  hiddenNeuronType = 1;
143  } else if ( str == QString("binary") ) {
144  hiddenNeuronType = 2;
145  } else if ( str == QString("logistic_0.2") ) {
146  hiddenNeuronType = 3;
147  } else {
148  ConfigurationHelper::throwUserConfigError(prefix + "hiddenNeuronType", str, "Wrong value (use \"logistic\", \"logistic+delta\", \"binary\" or \"logistic_0.2\"");
149  }
150  int outputNeuronType = 0;
151  str = ConfigurationHelper::getString( params, prefix + "outputNeuronType", "no_delta" );
152  if ( str == QString("no_delta") ) {
153  outputNeuronType = 0;
154  } else if ( str == QString("with_delta") ) {
155  outputNeuronType = 1;
156  } else {
157  ConfigurationHelper::throwUserConfigError(prefix + "outputNeuronType", str, "Wrong value (use \"no_delta\" or \"with_delta\"");
158  }
159  bool recurrentHiddens = ConfigurationHelper::getBool( params, prefix + "recurrentHiddens", false );
160  bool inputOutputConnections = ConfigurationHelper::getBool( params, prefix + "inputOutputConnections", false );
161  bool recurrentOutputs = ConfigurationHelper::getBool( params, prefix + "recurrentOutputs", false );
162  bool biasOnHidden = ConfigurationHelper::getBool( params, prefix + "biasOnHiddenNeurons", false );
163  bool biasOnOutput = ConfigurationHelper::getBool( params, prefix + "biasOnOutputNeurons", false );
164  create_net_block( inputNeuronType, hiddenNeuronType, outputNeuronType, recurrentHiddens, inputOutputConnections, recurrentOutputs, biasOnHidden, biasOnOutput );
165  } else {
166  // load the neural network from file. If the file doesn't exists, throwing an exception
167  if (load_net_blocks(netFile.toLatin1().data(), 0) == 0) {
168  ConfigurationHelper::throwUserConfigError(prefix + "netFile", netFile, "Could not open the specified network configuration file");
169  }
170  }
171 
173  // --- reallocate data on the basis of number of parameters
174  delete[] freep;
175  freep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
176  for(int r=0;r<nparameters;r++)
177  freep[r]=0.0f;
178 
179  delete[] backpropfreep;
180  backpropfreep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
181  for(int r=0;r<nparameters;r++)
182  backpropfreep[r]=0.0f;
183 
184  delete[] phep;
185  phep=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
186  for(int r=0;r<nparameters;r++)
187  phep[r]=DEFAULT_VALUE; // default value correspond to dont' care
188 
189  delete[] muts;
190  muts=new float[nparameters+1000]; // we allocate more space to handle network variations introduced by the user
191  for(int r=0;r<nparameters;r++)
192  muts[r]=DEFAULT_VALUE; // default value correspond to dont' care
193 
194  if ( !netFile.isEmpty() ) {
195  // Try to Load the file filename.phe if present in the current directory
196 
197 /* char cCurrentPath[300];
198 
199  if (!getcwd(cCurrentPath, sizeof(cCurrentPath)))
200  {
201  printf("error\n");
202  }
203  else {
204  cCurrentPath[sizeof(cCurrentPath) - 1] = '\0';
205 
206  printf ("The current working directory is %s\n", cCurrentPath);
207  }
208 */
209  QFileInfo fileNet( netFile );
210  QString filePhe = fileNet.baseName() + ".phe";
211  load_net_blocks(filePhe.toLatin1().data(), 1);
212  }
213 
214  //resetting net
215  resetNet();
216 
217  printBlocks();
218 
219  // we create the labels of the hidden neurons
220  for(int i = 0; i < nhiddens; i++) {
221  sprintf(neuronl[ninputs+i], "h%d", i);
222  neuronrange[ninputs+i][0] = 0.0;
223  neuronrange[ninputs+i][1] = 1.0;
224  neurondcolor[ninputs+i] = QColor(125,125,125);
225  }
226 }
227 
228 void Evonet::save(ConfigurationParameters& params, QString prefix) {
229  params.startObjectParameters( prefix, "Evonet", this );
230  if ( netFile.isEmpty() ) {
231  params.createParameter( prefix, "nSensors", QString::number(ninputs) );
232  params.createParameter( prefix, "nHidden", QString::number(nhiddens) );
233  params.createParameter( prefix, "nMotors", QString::number(noutputs) );
234  } else {
235  params.createParameter( prefix, "netFile", netFile );
236  }
237  params.createParameter( prefix, "weightRange", QString::number(wrange) );
238  params.createParameter( prefix, "gainRange", QString::number(grange) );
239  params.createParameter( prefix, "biasRange", QString::number(brange) );
240 }
241 
242 void Evonet::describe( QString type ) {
243  Descriptor d = addTypeDescription( type, "Neural Network imported from Evorobot" );
244  d.describeInt( "nSensors" ).limits( 1, MAXN ).help( "The number of sensor neurons" );
245  d.describeInt( "nHiddens" ).limits( 1, MAXN ).help( "The number of hidden neurons" );
246  d.describeInt( "nMotors" ).limits( 1, MAXN ).help( "The number of motor neurons" );
247  d.describeString( "netFile" ).help( "The file .net where is defined the architecture to load. WARNING: when this parameter is specified any other parameters will be ignored" );
248  d.describeReal( "weightRange" ).def(5.0f).limits(1,+Infinity).help( "The synpatic weight of the neural network can only assume values in [-weightRange, +weightRange]" );
249  d.describeReal( "gainRange" ).def(5.0f).limits(0,+Infinity).help( "The gain of a neuron will can only assume values in [0, +gainRange]" );
250  d.describeReal( "biasRange" ).def(5.0f).limits(0,+Infinity).help( "The bias of a neuron will can only assume values in [-biasRange, +biasRange]" );
251  d.describeEnum( "inputNeuronType" ).def("no_delta").values( QStringList() << "no_delta" << "with_delta" ).help( "The type of input neurons when the network is auto generated");
252  d.describeEnum( "hiddenNeuronType" ).def("logistic").values( QStringList() << "logistic" << "logistic+delta" << "binary" << "logistic_0.2" ).help( "The type of hidden neurons when the network is auto generated");
253  d.describeEnum( "outputNeuronType" ).def("no_delta").values( QStringList() << "no_delta" << "with_delta" ).help( "The type of output neurons when the network is auto generated");
254  d.describeBool( "recurrentHiddens" ).def(false).help( "when true generated a network with recurrent hidden neurons");
255  d.describeBool( "inputOutputConnections" ).def(false).help( "when true generated a network with input-output connections in addition to input-hidden-output connections");
256  d.describeBool( "recurrentOutputs" ).def(false).help( "when true generated a network with recurrent output neurons");
257  d.describeBool( "biasOnHiddenNeurons" ).def(false).help( "when true generate a network with hidden neurons with a bias");
258  d.describeBool( "biasOnOutputNeurons" ).def(false).help( "when true generate a network with output neurons with a bias");
259  d.describeBool( "showTeachingInput" ).def(false).help( "Whether the teaching input has to be shown in the UI");
260 }
261 
263  return new EvonetUI( this, &neuronsMonitorUploader );
264 }
265 
266 void Evonet::create_net_block( int inputNeuronType, int hiddenNeuronType, int outputNeuronType, bool recurrentHiddens, bool inputOutputConnections, bool recurrentOutputs, bool biasOnHidden, bool biasOnOutput )
267 {
268  int n;
269  int i;
270  int startx;
271  int dx;
272 
273  // setting the neuron types
274  for(i = 0; i < this->ninputs; i++) {
275  this->neurontype[i]= inputNeuronType;
276  neuronbias[i] = 0;
277  }
278  for(i = this->ninputs; i < (this->nneurons - this->noutputs); i++) {
279  this->neurontype[i]= hiddenNeuronType;
280  neuronbias[i] = (biasOnHidden) ? 1 : 0;
281  }
282  for(i = (this->nneurons - this->noutputs); i < this->nneurons; i++) {
283  this->neurontype[i]= outputNeuronType;
284  neuronbias[i] = (biasOnOutput) ? 1 : 0;
285  }
286 
287  // gain
288  for(i=0; i < this->nneurons; i++) {
289  this->neurongain[i]= 0;
290  }
291 
292  this->net_nblocks = 0;
293  // input update block
294  this->net_block[this->net_nblocks][0] = 1;
295  this->net_block[this->net_nblocks][1] = 0;
296  this->net_block[this->net_nblocks][2] = this->ninputs;
297  this->net_block[this->net_nblocks][3] = 0;
298  this->net_block[this->net_nblocks][4] = 0;
299  this->net_block[this->net_nblocks][5] = 0;
300  this->net_nblocks++;
301 
302  // input-hidden connections
303  if (this->nhiddens > 0) {
304  this->net_block[this->net_nblocks][0] = 0;
305  this->net_block[this->net_nblocks][1] = this->ninputs;
306  this->net_block[this->net_nblocks][2] = this->nhiddens;
307  this->net_block[this->net_nblocks][3] = 0;
308  this->net_block[this->net_nblocks][4] = this->ninputs;
309  this->net_block[this->net_nblocks][5] = 0;
310  this->net_nblocks++;
311  }
312 
313  // hidden-hidden connections
314  if (recurrentHiddens) {
315  this->net_block[this->net_nblocks][0] = 0;
316  this->net_block[this->net_nblocks][1] = this->ninputs;
317  this->net_block[this->net_nblocks][2] = this->nhiddens;
318  this->net_block[this->net_nblocks][3] = this->ninputs;
319  this->net_block[this->net_nblocks][4] = this->nhiddens;
320  this->net_block[this->net_nblocks][5] = 0;
321  this->net_nblocks++;
322  }
323 
324  // hidden update block
325  if (this->nhiddens > 0) {
326  this->net_block[this->net_nblocks][0] = 1;
327  this->net_block[this->net_nblocks][1] = this->ninputs;
328  this->net_block[this->net_nblocks][2] = this->nhiddens;
329  this->net_block[this->net_nblocks][3] = 0;
330  this->net_block[this->net_nblocks][4] = 0;
331  this->net_block[this->net_nblocks][5] = 0;
332  this->net_nblocks++;
333  }
334 
335  // input-output connections
336  if (this->nhiddens == 0 || inputOutputConnections) {
337  this->net_block[this->net_nblocks][0] = 0;
338  this->net_block[this->net_nblocks][1] = this->ninputs + this->nhiddens;
339  this->net_block[this->net_nblocks][2] = this->noutputs;
340  this->net_block[this->net_nblocks][3] = 0;
341  this->net_block[this->net_nblocks][4] = this->ninputs;
342  this->net_block[this->net_nblocks][5] = 0;
343  this->net_nblocks++;
344  }
345 
346  // hidden-output connections
347  if (this->nhiddens > 0) {
348  this->net_block[net_nblocks][0] = 0;
349  this->net_block[net_nblocks][1] = this->ninputs + this->nhiddens;
350  this->net_block[net_nblocks][2] = this->noutputs;
351  this->net_block[net_nblocks][3] = this->ninputs;
352  this->net_block[net_nblocks][4] = this->nhiddens;
353  this->net_block[this->net_nblocks][5] = 0;
354  this->net_nblocks++;
355  }
356 
357  // output-output connections
358  if (recurrentOutputs) {
359  this->net_block[this->net_nblocks][0] = 0;
360  this->net_block[this->net_nblocks][1] = this->ninputs + this->nhiddens;
361  this->net_block[this->net_nblocks][2] = this->noutputs;
362  this->net_block[this->net_nblocks][3] = this->ninputs + this->nhiddens;
363  this->net_block[this->net_nblocks][4] = this->noutputs;
364  this->net_block[this->net_nblocks][5] = 0;
365  this->net_nblocks++;
366  }
367 
368  // output update block
369  this->net_block[this->net_nblocks][0] = 1;
370  this->net_block[this->net_nblocks][1] = this->ninputs + this->nhiddens;
371  this->net_block[this->net_nblocks][2] = this->noutputs;
372  this->net_block[this->net_nblocks][3] = 0;
373  this->net_block[this->net_nblocks][4] = 0;
374  this->net_block[this->net_nblocks][5] = 0;
375  this->net_nblocks++;
376 
377  // cartesian xy coordinate for sensory neurons for display (y=400)
378  n = 0;
379  dx = 30;//25
380  if (this->ninputs > this->noutputs) {
381  startx = 50;
382  } else {
383  startx = ((this->noutputs - this->ninputs) / 2) * dx + 50;
384  }
385  for(i = 0; i < this->ninputs; i++, n++) {
386  this->neuronxy[n][0] = (i * dx) + startx;
387  this->neuronxy[n][1] = 400;
388  }
389 
390  // cartesian xy coordinate for internal neurons for display (y=225)
391  startx = this->ninputs * dx;
392  for(i=0; i < (this->nneurons - (this->ninputs + this->noutputs)); i++, n++) {
393  this->neuronxy[n][0] = startx + (i * dx);
394  this->neuronxy[n][1] = 225;
395  }
396 
397  // cartesian xy coordinate for motor neurons for display (y=50)
398  if (this->ninputs > this->noutputs) {
399  startx = ((this->ninputs - this->noutputs) / 2) * dx + 50;
400  } else {
401  startx = 50;
402  }
403  for(i=0; i < this->noutputs; i++, n++) {
404  this->neuronxy[n][0] = startx + (i * dx);
405  this->neuronxy[n][1] = 50;
406  }
407 
408  // set neurons whose activation should be displayed
409  for(i=0; i < this->nneurons; i++) {
410  this->neurondisplay[i] = 1;
411  }
412 
413  // calculate the height and width necessary to display all created neurons (drawnymax, drawnxmax)
414  drawnymax = 400 + 30;
415  for(i = 0, drawnxmax = 0; i < nneurons; i++) {
416  if (neuronxy[i][0] > drawnxmax) {
417  drawnxmax = neuronxy[i][0];
418  }
419  }
420  drawnxmax += 60;
421 
422  // compute the number of parameters
424 
425  //i = this->ninputs;
426  //if (this->ninputs > i)
427  // i = this->noutputs;
428  //if ((this->nneurons - this->noutputs) > i)
429  // i = (this->nneurons - this->noutputs);
430  //drawnxmax = (i * dx) + dx + 30;
431 }
432 
433 int Evonet::load_net_blocks(const char *filename, int mode)
434 {
435 
436  FILE *fp;
437  int b;
438  int n;
439  int i;
440  float *ph;
441  float *mu;
442  float *p;
443  int np;
444  const int bufferSize = 128;
445  char cbuffer[bufferSize];
446 
447  if ((fp = fopen(filename,"r")) != NULL)
448  {
449  fscanf(fp,"ARCHITECTURE\n");
450  fscanf(fp,"nneurons %d\n", &nneurons);
451  fscanf(fp,"nsensors %d\n", &ninputs);
452  fscanf(fp,"nmotors %d\n", &noutputs);
453  if (nneurons > MAXN)
454  Logger::error( "Evonet - increase MAXN to support more than "+QString::number(MAXN)+" neurons" );
455  nhiddens = nneurons - (ninputs + noutputs);
456  fscanf(fp,"nblocks %d\n", &net_nblocks);
457  for (b=0; b < net_nblocks; b++)
458  {
459  fscanf(fp,"%d %d %d %d %d %d", &net_block[b][0],&net_block[b][1],&net_block[b][2],&net_block[b][3],&net_block[b][4], &net_block[b][5]);
460  if (net_block[b][0] == 0)
461  fscanf(fp," // connections block\n");
462  if (net_block[b][0] == 1)
463  fscanf(fp," // block to be updated\n");
464  if (net_block[b][0] == 2)
465  fscanf(fp," // gain block\n");
466  if (net_block[b][0] == 3)
467  fscanf(fp," // modulated gain block\n");
468  }
469 
470  fscanf(fp,"neurons bias, delta, gain, xy position, display\n");
471  drawnxmax = 0;
472  drawnymax = 0;
473  for(n=0; n < nneurons; n++)
474  {
475  fscanf(fp,"%d %d %d %d %d %d\n", &neuronbias[n], &neurontype[n], &neurongain[n], &neuronxy[n][0], &neuronxy[n][1], &neurondisplay[n]);
476  if(drawnxmax < neuronxy[n][0])
477  drawnxmax = neuronxy[n][0];
478  if(drawnymax < neuronxy[n][1])
479  drawnymax = neuronxy[n][1];
480  }
481  drawnxmax += 30;
482  drawnymax += 30;
483 
484  if (mode == 1)
485  {
486  fscanf(fp,"FREE PARAMETERS %d\n", &np);
487  if (nparameters != np) {
488  Logger::error(QString("ERROR: parameters defined are %1 while %2 contains %3 parameters").arg(nparameters).arg(filename).arg(np));
489  }
490  i = 0;
491  ph = phep;
492  mu = muts;
493  p = freep;
494 
495  while (fgets(cbuffer,bufferSize,fp) != NULL && i < np)
496  {
497  //read values from line
498  QString line = cbuffer;
499  QStringList lineContent = line.split(QRegExp("\\s+"), QString::SkipEmptyParts);
500 
501  bool floatOnSecondPlace = false;
502  lineContent[1].toFloat(&floatOnSecondPlace);
503 
504  if(lineContent.contains("*") || floatOnSecondPlace)
505  readNewPheLine(lineContent, ph, mu);
506  else
507  readOldPheLine(lineContent, ph, mu);
508 
509  *p = *ph;
510 
511  i++;
512  mu++;
513  ph++;
514  p++;
515  }
516  pheloaded = true;
517  }
518  fclose(fp);
519 
520  Logger::info( "Evonet - loaded file " + QString(filename) );
521  return(1);
522  }
523  else
524  {
525  Logger::warning( "Evonet - File " + QString(filename) + " not found" );
526  return(0);
527  }
528 }
529 
530 void Evonet::readOldPheLine(QStringList line, float* par, float* mut)
531 {
532  *par = line[0].toFloat();
533 
534  if(*par != DEFAULT_VALUE) { //no mutations
535  *mut = 0;
536  }
537 }
538 
539 void Evonet::readNewPheLine(QStringList line, float* par, float* mut)
540 {
541  if(line[0] == "*") {
542  *par = DEFAULT_VALUE; //start at random
543  } else {
544  //error handling
545 
546 /* char *tmp = line[0].toLatin1().data();
547  printf("read : %s\n",line[0].toLatin1().data());
548  printf("tmp : %s\n",tmp);
549 
550  for (int i = 0; i<line[0].length(); i++) {
551  if (tmp[i]==',') {
552  tmp[i] = '.';
553 
554  }
555  }
556 */
557 // *par = strtof(tmp, NULL);
558 
559 
560 
561 // sscanf(tmp, "%f", par);
562  *par = line[0].toFloat();
563  }
564 
565 
566  if(line[1] == "*") {
567  *mut = DEFAULT_VALUE;
568  } else {
569  *mut = line[1].toFloat();
570  }
571 }
572 
573 /*
574  * It save the architecture and also the parameters (when mode =1)
575  */
576 void Evonet::save_net_blocks(const char *filename, int mode)
577 {
578  FILE *fp;
579  int b;
580  int n;
581  int i;
582  int t;
583 
584  char* default_string = "*\t\t";
585  char **p = new char*[freeParameters()];
586  char **mu = new char*[freeParameters()];
587  for(int h=0; h<freeParameters(); h++) {
588  mu[h] = new char[50];
589  p[h] = new char[50];
590 
591  if(muts[h] == DEFAULT_VALUE) {
592  mu[h] = default_string;
593  } else {
594  sprintf(mu[h], "%f", muts[h]);
595  }
596 
597  if(freep[h] == DEFAULT_VALUE) {
598  p[h] = default_string;
599  } else {
600  sprintf(p[h], "%f", freep[h]);
601  }
602  }
603 
604  if ((fp = fopen(filename,"w")) != NULL) {
605  fprintf(fp,"ARCHITECTURE\n");
606  fprintf(fp,"nneurons %d\n", nneurons);
607  fprintf(fp,"nsensors %d\n", ninputs);
608  fprintf(fp,"nmotors %d\n", noutputs);
609  fprintf(fp,"nblocks %d\n", net_nblocks);
610  for (b = 0; b < net_nblocks; b++) {
611  fprintf(fp,"%d %d %d %d %d %d", net_block[b][0],net_block[b][1],net_block[b][2],net_block[b][3],net_block[b][4],net_block[b][5]);
612  if (net_block[b][0] == 0) {
613  fprintf(fp," // connections block\n");
614  } else if (net_block[b][0] == 1) {
615  fprintf(fp," // block to be updated\n");
616  } else if (net_block[b][0] == 2) {
617  fprintf(fp," // gain block\n");
618  } else if (net_block[b][0] == 3) {
619  fprintf(fp," // modulated gain block\n");
620  }
621  }
622  fprintf(fp,"neurons bias, delta, gain, xy position, display\n");
623  for(n = 0; n < nneurons; n++) {
624  fprintf(fp,"%d %d %d %d %d %d\n", neuronbias[n], neurontype[n], neurongain[n], neuronxy[n][0], neuronxy[n][1], neurondisplay[n]);
625  }
626 
628  if (mode == 1) {
629  fprintf(fp,"FREE PARAMETERS %d\n", nparameters);
630  for(i = 0; i < nneurons; i++) {
631  if (neurongain[i] == 1) {
632  fprintf(fp,"%s \t %s \tgain %s\n",*p, *mu, neuronl[i]);
633  p++;
634  mu++;
635  }
636  }
637  for(i=0; i<nneurons; i++) {
638  if (neuronbias[i] == 1) {
639  fprintf(fp,"%s \t %s \tbias %s\n",*p, *mu, neuronl[i]);
640  p++;
641  mu++;
642  }
643  }
644  for (b=0; b < net_nblocks; b++) {
645  if (net_block[b][0] == 0) {
646  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
647  for(i=net_block[b][3]; i < net_block[b][3] + net_block[b][4];i++) {
648  fprintf(fp,"%s \t %s \tweight %s from %s\n",*p, *mu, neuronl[t], neuronl[i]);
649  p++;
650  mu++;
651  }
652  }
653  } else if (net_block[b][0] == 1) {
654  for(t=net_block[b][1]; t < (net_block[b][1] + net_block[b][2]); t++) {
655  if (neurontype[t] == 1) {
656  float timeC = 0;
657  if(*p != default_string) {
658  timeC = atof(*p);
659  timeC = fabs(timeC)/wrange; //(timeC + wrange)/(wrange*2);
660  }
661 
662  fprintf(fp,"%s \t %s \ttimeconstant %s (%f)\n", *p, *mu, neuronl[t], timeC);
663  p++;
664  mu++;
665  }
666  }
667  }
668  }
669  }
670  fprintf(fp,"END\n");
671 
672  Logger::info( "Evonet - controller saved on file " + QString(filename) );
673  } else {
674  Logger::error( "Evonet - unable to create the file " + QString(filename) );
675  }
676  fclose(fp);
677 }
678 
679 /*
680  * standard logistic
681  */
682 float Evonet::logistic(float f)
683 {
684  return((float) (1.0 / (1.0 + exp(0.0 - f))));
685 }
686 
687  float Evonet::tansig(float f) {
688 
689  return 2.0/(1.0+exp(-2.0*f))-1.0;
690  }
691 
692 /*
693  * compute the number of free parameters
694  */
696 {
697  int i;
698  int t;
699  int b;
700  int updated[MAXN];
701  int ng;
702  int nwarnings;
703 
704  ng = 0;
705  for(i=0;i < nneurons;i++) {
706  updated[i] = 0;
707  }
708  // gain
709  for(i=0;i < nneurons;i++) {
710  if (neurongain[i] == 1) {
711  ng++;
712  }
713  }
714  // biases
715  for(i=0;i < nneurons;i++) {
716  if (neuronbias[i] == 1) {
717  ng++;
718  }
719  }
720  // timeconstants
721  for(i=0;i < nneurons;i++) {
722  if (neurontype[i] == 1) {
723  ng++;
724  }
725  }
726  // blocks
727  for (b=0; b < net_nblocks; b++) {
728  // connection block
729  if (net_block[b][0] == 0) {
730  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
731  for(i=net_block[b][3]; i < net_block[b][3] + net_block[b][4];i++) {
732  ng++;
733  }
734  }
735  }
736  }
737 
738  nwarnings = 0;
739  for(i=0;i < nneurons;i++) {
740  if (updated[i] < 1 && nwarnings == 0) {
741  Logger::warning( "Evonet - neuron " + QString::number(i) + " will never be activated according to the current architecture" );
742  nwarnings++;
743  }
744  if (updated[i] > 1 && nwarnings == 0) {
745  Logger::warning( "Evonet - neuron " + QString::number(i) + " will be activated more than once according to the current architecture" );
746  nwarnings++;
747  }
748  }
749  nparameters=ng; // number of parameters
750 }
751 
753 {
754  int i;
755  int t;
756  int b;
757  float *p;
758  float delta;
759  float netinput[MAXN];
760  float gain[MAXN];
761 
762  p = freep;
763  //nl = neuronlesion;
764 
765  // gain
766  for(i=0;i < nneurons;i++) {
767  if (neurongain[i] == 1) {
768  gain[i] = (float) (fabs((double) *p) / wrange) * grange;
769  p++;
770  } else {
771  gain[i] = 1.0f;
772  }
773  }
774  // biases
775 
776 /* printf("weights: ");
777  printWeights();
778  printf("\n");
779 */
780  for(i=0;i < nneurons;i++) {
781  if (neuronbias[i] == 1) {
782 // printf("netinput[%d]= [%ld] %f/%f*%f = %f*%f = %f",i,p-freep, *p,wrange,brange,((double)*p/wrange),brange,((double)*p/wrange)*brange);
783  netinput[i] = ((double)*p/wrange)*brange;
784  p++;
785  } else {
786  netinput[i] = 0.0f;
787  }
788 // printf("\tnetinput[%d]: %f -- wrange: %f\n",i,netinput[i],wrange);
789  }
790 
791 
792  // blocks
793  for (b=0; b < net_nblocks; b++) {
794  // connection block
795  if (net_block[b][0] == 0) {
796  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
797  for(i=net_block[b][3]; i < net_block[b][3] + net_block[b][4];i++) {
798  netinput[t] += act[i] * gain[i] * *p;
799 // printf("netinput[%d] += act[%d] * gain[%d] * %f += %f * %f * %f += %f = %f\n",t,i,i,*p,act[i],gain[i], *p,act[i] * gain[i] * *p, netinput[t] );
800  p++;
801  }
802  }
803  }
804  // gain block (gain of neuron a-b set equal to gain of neuron a)
805  if (net_block[b][0] == 2) {
806  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
807  gain[t] = gain[net_block[b][1]];
808  }
809  }
810  // gain block (gain of neuron a-b set equal to act[c])
811  if (net_block[b][0] == 3) {
812  for(t=net_block[b][1]; t < net_block[b][1] + net_block[b][2];t++) {
813  gain[t] = act[net_block[b][3]];
814  }
815  }
816  // update block
817  if (net_block[b][0] == 1) {
818  for(t=net_block[b][1]; t < (net_block[b][1] + net_block[b][2]); t++) {
819  if (t < ninputs) {
820  switch(neurontype[t]) {
821  case 0: // simple rely units
822  act[t] = input[t];
823  break;
824  case 1: // delta neurons
825  delta = (float) (fabs((double) *p) / wrange);
826  p++;
827  act[t] = (act[t] * delta) + (input[t] * (1.0f - delta));
828  break;
829  }
830  if(neuronlesions > 0 && neuronlesion[t]) {
831  act[t]= (float)neuronlesionVal[t];
832  }
833  } else {
834  switch(neurontype[t]) {
835  case 0: // simple logistic
836  default:
837  act[t] = logistic(netinput[t]);
838  delta = 0.0;
839  break;
840  case 1: // delta neurons
841  delta = (float) (fabs((double) *p) / wrange);
842  p++;
843  act[t] = (act[t] * delta) + (logistic(netinput[t]) * (1.0f - delta));
844  break;
845  case 2: // binary neurons
846  if (netinput[t] >= 0.0) {
847  act[t] = 1.0;
848  } else {
849  act[t] = 0.0;
850  }
851  break;
852  case 3: // logistic2 neurons
853  act[t] = logistic(netinput[t]*0.2f);
854  delta = 0.0;
855  break;
856  }
857  if(neuronlesions > 0 && neuronlesion[t]) {
858  act[t]= (float)neuronlesionVal[t];
859  }
860  }
861  }
862  }
863  }
864 
865  // Storing the current activations
866  memcpy(storedActivations[nextStoredActivation], act, nneurons * sizeof(float));
867  nextStoredActivation = (nextStoredActivation + 1) % MAXSTOREDACTIVATIONS;
868  if (firstStoredActivation == nextStoredActivation) {
869  // We have filled the circular buffer, discarding the oldest activation
870  firstStoredActivation = (firstStoredActivation + 1) % MAXSTOREDACTIVATIONS;
871  }
872 
873  // increment the counter
874  updatescounter++;
875 
876  emit evonetUpdated();
877 
878  // If a downloader is associated with the neuronsMonitorUploader, uploading activations
879  if (neuronsMonitorUploader.getDownloader() != NULL) {
880  // This call can return NULL if GUI is too slow
881  DatumToUpload<ActivationsToGui> d(neuronsMonitorUploader);
882 
883  d->activations = true;
884 
885  // Reserving the correct number of elements, for efficiency reasons
886  d->data.reserve(nneurons);
887  d->data.resize(nneurons);
888 
889  // Copying data
890  std::copy(act, &(act[nneurons]), d->data.begin());
891 
892  // Adding the current step
893  d->updatesCounter = updatescounter;
894  }
895 }
896 
897 int Evonet::setInput(int inp, float value)
898 {
899  if (inp>=ninputs || inp<0) {
900  return -1;// exceding sensor number
901  }
902  input[inp]=value;
903  return 0;
904 }
905 
906 float Evonet::getOutput(int out)
907 {
908  if(out>=noutputs) {
909  return -1; //exceeding out numbers
910  }
911  return act[ninputs+nhiddens+out];
912 }
913 
914 float Evonet::getInput(int in)
915 {
916  return this->input[in];
917 }
918 
919 float Evonet::getNeuron(int in)
920 {
921  return act[in];
922 }
923 
925 {
926  int i;
927  for (i = 0; i < MAXN; i++) {
928  act[i]=0.0;
929  netinput[i]=0.0;
930  input[i]=0.0;
931  }
932  updatescounter = 0;
933 }
934 
935 void Evonet::injectHidden(int nh, float val)
936 {
937  if(nh<nhiddens) {
938  act[this->ninputs+nh] = val;
939  }
940 }
941 
942 float Evonet::getHidden(int h)
943 {
944  if(h<nhiddens && h>=0) {
945  return act[this->ninputs+h];
946  } else {
947  return -999;
948  }
949 }
950 
952 {
953  return this->nparameters;
954 }
955 
957 {
958  return freep[i];
959 }
960 
962 {
963  return pheloaded;
964 }
965 
966 /*
967  * Copy parameters from genotype
968  */
969 void Evonet::setParameters(const int *dt)
970 {
971  int i;
972  float *p;
973 
974  p = freep;
975  for (i=0; i<freeParameters(); i++, p++) {
976  *p = wrange - ((float)dt[i]/geneMaxValue)*wrange*2;
977  }
978  emit evonetUpdated();
979 }
980 
981 void Evonet::getMutations(float* GAmut)
982 {
983  //copy mutation vector
984  for(int i=0; i<freeParameters(); i++) {
985  GAmut[i] = muts[i];
986  }
987 }
988 
989 void Evonet::copyPheParameters(int* pheGene)
990 {
991  for(int i=0; i<freeParameters(); i++)
992  {
993  if(phep[i] == DEFAULT_VALUE) {
994  pheGene[i] = DEFAULT_VALUE;
995  } else {
996  pheGene[i] = (int)((wrange - phep[i])*geneMaxValue/(2*wrange));
997  }
998  }
999 }
1000 
1002 {
1003  QString output;
1004 
1005  output = "In: ";
1006  for (int in = 0; in < this->ninputs; in++) {
1007  output += QString("%1 ").arg(this->input[in], 0, 'f', 10);
1008  }
1009  output += "Hid: ";
1010  for (int hi = this->ninputs; hi < (this->nneurons - this->noutputs); hi++) {
1011  output += QString("%1 ").arg(this->act[hi], 0, 'f', 10);
1012  }
1013  output += "Out: ";
1014  for (int out = 0; out < this->noutputs; out++) {
1015  output += QString("%1 ").arg(this->act[this->ninputs+this->nhiddens+out], 0, 'f', 10);
1016  }
1017 
1018  Logger::info(output);
1019 
1020 }
1021 
1022 int Evonet::getParamBias(int nbias)
1023 {
1024  int pb=-999; // if remain -999 it means nbias is out of range
1025  if (nbias<nparambias && nbias>-1) {
1026  pb=(int) freep[nparambias+nbias];
1027  }
1028  return pb;
1029 }
1030 
1032 {
1033  return wrange;
1034 }
1035 
1037 {
1038  return brange;
1039 }
1040 
1042 {
1043  return grange;
1044 }
1045 
1046 
1048 {
1049  Logger::info("Evonet - ninputs " + QString::number(this->ninputs));
1050  Logger::info("Evonet - nhiddens " + QString::number(this->nhiddens));
1051  Logger::info("Evonet - noutputs " + QString::number(this->noutputs));
1052  Logger::info("Evonet - nneurons " + QString::number(this->nneurons));
1053 
1054  for(int i=0;i<this->net_nblocks;i++) {
1055  Logger::info( QString( "Evonet Block - %1 | %2 - %3 -> %4 - %5 | %6" )
1056  .arg(net_block[i][0])
1057  .arg(net_block[i][1])
1058  .arg(net_block[i][2])
1059  .arg(net_block[i][3])
1060  .arg(net_block[i][4])
1061  .arg(net_block[i][5]));
1062  }
1063 }
1064 
1066 {
1067  return ninputs;
1068 }
1069 
1071 {
1072  return nhiddens;
1073 }
1074 
1076 {
1077  return noutputs;
1078 }
1079 
1081 {
1082  return nneurons;
1083 }
1084 
1085 void Evonet::setRanges(double weight, double bias, double gain)
1086 {
1087  wrange=weight;
1088  brange=bias;
1089  grange=gain;
1090 }
1091 
1093 {
1094  if (firstStoredActivation == nextStoredActivation) {
1095  return NULL;
1096  }
1097 
1098  const int ret = firstStoredActivation;
1099  firstStoredActivation = (firstStoredActivation + 1) % MAXSTOREDACTIVATIONS;
1100  return storedActivations[ret];
1101 }
1102 
1104 {
1105  return updatescounter;
1106 }
1107 
1109 {
1110  for (int i = 0; i<freeParameters(); i++) {
1111  printf("%.2f ",freep[i]);
1112  }
1113  printf("\n");
1114 }
1115 
1117 {
1118  float range = max-min;
1119 
1120  for (int i = 0; i<freeParameters(); i++) {
1121  freep[i] = (((float) rand())/RAND_MAX)*range +min;
1122 // freep[i] = 0.2;
1123  }
1124 
1125 /*
1126  freep[0] = 0.26;
1127  freep[1] = 0.27;
1128  freep[2] = 0.28;
1129  freep[3] = -0.23;
1130  freep[4] = 0.2;
1131  freep[5] = 0.21;
1132  freep[6] = 0.22;
1133  freep[7] = 0.23;
1134  freep[8] = 0.24;
1135  freep[9] = 0.25;
1136  freep[10] = -0.2;
1137  freep[11] = -0.21;
1138  freep[12] = -0.22;
1139 */
1140 }
1141 
1142 void Evonet::initWeightsInRange(float minBias, float maxBias, float minWeight, float maxWeight)
1143 {
1144  const float brange = maxBias - minBias;
1145  const float wrange = maxWeight - minWeight;
1146 
1147  // Pointer to free parameters to randomize them
1148  float* p = freep;
1149 
1150  // Randomizing gains. We use the biases range for them
1151  for(int i = 0; i < nneurons; ++i) {
1152  if (neurongain[i] == 1) {
1153  *(p++) = (((float) rand()) / float(RAND_MAX)) * brange + minBias;
1154  }
1155  }
1156 
1157  // Now randomizing biases
1158  for(int i = 0; i < nneurons; ++i) {
1159  if (neuronbias[i] == 1) {
1160  *(p++) = (((float) rand()) / float(RAND_MAX)) * brange + minBias;
1161  }
1162  }
1163 
1164  // Finally randomizing all the rest (there should be only weights, here)
1165  for (; p != &freep[freeParameters()]; ++p) {
1166  *p = (((float) rand()) / float(RAND_MAX)) * wrange + minWeight;
1167  }
1168 }
1169 
1171 
1172  initWeightsInRange(min, max);
1173 
1174  double beta = 0.7 * pow(nhiddens, 1.0/ninputs);
1175  double norm = 0;
1176 
1177  double tmp;
1178 
1179  for (int i =0; i<nhiddens; i++) {
1180  for (int j = 0; j<ninputs; j++) {
1181  tmp = getWeight(i+ninputs, j);
1182  norm += tmp*tmp;
1183  }
1184 
1185  if (neuronbias[i+ninputs]) {
1186 
1187  int ptr = 0;
1188  for (int j = 0; j<i+ninputs; j++) {
1189  if(neuronbias[j])
1190  ptr++;
1191  }
1192 
1193  norm += freep[ptr]*freep[ptr];
1194  }
1195 
1196 
1197  }
1198 
1199  norm = sqrt(norm);
1200 
1201  double k = beta/norm;
1202 
1203  for (int i =0; i<nhiddens; i++) {
1204  for (int j = 0; j<ninputs; j++) {
1205  setWeight(i+ninputs, j, getWeight(i+ninputs, j)*k);
1206  }
1207 
1208  if (neuronbias[i+ninputs]) {
1209 
1210  int ptr = 0;
1211  for (int j = 0; j<i+ninputs; j++) {
1212  if(neuronbias[j])
1213  ptr++;
1214  }
1215 
1216  freep[ptr]*=k;
1217  }
1218  }
1219 
1220 
1221  }
1222 
1223 
1225 
1226  //for (int i = 0; i<freeParameters(); i++) {
1227  // freep[i] = 0;
1228  //}
1229 
1230  int ptr = 0;
1231 
1232  for (int i = 0; i<nneurons; i++) {
1233  if (neuronbias[i]) {
1234  if (i>16 && i<23) {
1235  freep[ptr++]=0;
1236  }
1237  else {
1238 // freep[ptr++]=;
1239  ptr++;
1240  }
1241  }
1242  }
1243 
1244 
1245  for (int b=0; b<net_nblocks; b++) {
1246  for (int i=0; i<net_block[b][2]*net_block[b][4]; i++) {
1247  if (net_block[b][0] == 0 && net_block[b][5]==1){
1248 // freep[ptr++]=-1;
1249  ptr++;
1250  }
1251  else if (net_block[b][0] == 0 && net_block[b][5]==0){
1252  freep[ptr++]=0;
1253  }
1254  }
1255  }
1256  //Serve ad impostare i pesi del riflesso
1257 
1258  int p = 0;
1259  for (int i = 0; i<nneurons; i++)
1260  p += neuronbias[i];
1261 
1262 
1263  freep[p++] = 0;
1264  freep[p++] = 15;
1265  freep[p++] = -15;
1266  freep[p] = 0;
1267 
1268 
1269 
1270  }
1271 
1272 //#define BPDEBUG
1273 
1274 #ifdef BPDEBUG
1275 #define bpdebug(x,...) printf(x,##__VA_ARGS__)
1276 #define debug(x,...) printf(x,##__VA_ARGS__)
1277 #else
1278 #define bpdebug(x,...)
1279 #define debug(x,...)
1280 #endif
1281 
1282 #define inRange(x,y,z) ( (x) >= (y) && (x) < (y)+(z) )
1283 
1284  int Evonet::isHidden(int neuron){
1285  return neuron >= ninputs && neuron < ninputs+nhiddens;
1286  }
1287 
1288 /* float Evonet::backPropStep(QVector<float> tInput, double rate) {
1289 
1290  float globalError = 0;
1291 
1292  float delta[MAXN];
1293  float error[MAXN];
1294 
1295  int b;
1296  int nbiases = 0;
1297 
1298  for (int i = 0; i<nneurons; i++, nbiases += neuronbias[i]);
1299 
1300  bpdebug("%s net: %p, net_blocks: %p\n",__PRETTY_FUNCTION__,this, net_block);
1301 
1302 #ifdef BPDEBUG
1303  printf("netinput: ");
1304 
1305  for (int i = 0; i<ninputs; i++) {
1306  printf("%f ",input[i]);
1307  }
1308  printf("\n");
1309 
1310  printWeights();
1311  printIO();
1312 #endif
1313 
1314  bpdebug("nbiases: %d\n\n", nbiases);
1315 
1316  //
1317  // Storing offsets to easily obtain the staring position
1318  // of the weights for each connection block
1319  //
1320 
1321  bpdebug("Offsets:");
1322 
1323  QVector<int> offsets;
1324  offsets.push_back(nbiases);
1325  bpdebug("\t%d", offsets[0]);
1326 
1327  int p = 0;
1328 
1329  for (b = 0; b<net_nblocks; b++) {
1330  if (net_block[b][0]==0) {
1331  offsets.push_back(offsets[p++] + net_block[b][2]*net_block[b][4]);
1332  bpdebug("\t%d", offsets[p]);
1333  break;
1334  }
1335  }
1336 
1337 
1338  int bias_ptr = offsets[0]-1;
1339  bpdebug("\nbias_ptr: %d\n\n", bias_ptr);
1340 
1341  //
1342  // Compute the error for the output neurons
1343  //
1344  // NOTE : this assumes that the last connection block to train (net_block[b][5]==1) is the
1345  // one of the outputs. This code should be modified to consider the case in which the
1346  // connections to the output neurons are divided in two (or more) blocks.
1347  // It may be useful to have a function/flag to indicate wether a connection block involves
1348  // output neurons or not.
1349  //
1350 
1351 
1352  bpdebug("Error for last block\n");
1353  for (b=net_nblocks-1; b>=0; b--) {
1354  if (! (net_block[b][0]==0 && net_block[b][5]==1) )
1355  continue;
1356 
1357  int end = net_block[b][1]+net_block[b][2];
1358 
1359  for (int i = net_block[b][1]; i<end; i++) {
1360 
1361  float d = tInput[i-net_block[b][1]] - act[i];
1362 
1363  error[i] = act[i]*(1-act[i]) * d;
1364  delta[i] = error[i] * rate;
1365 
1366  bpdebug("\terror[%d]= gradient*(tInput[i - net_block[%d][3]]-act[%d]) = act[%d]*(1-act[%d]) * (tInput[%d - %d = %d]-act[%d]) = %f*(1-%f) * (%f - %f) = %f * %f = %f\n",i,b,i,i,i,i,net_block[b][1],i-net_block[b][1],i,act[i],act[i],tInput[i-net_block[b][1]],act[i],act[i]*(1-act[i]),(tInput[i-net_block[b][1]] - act[i]), error[i]);
1367  bpdebug("\terror[%d]= %f\n",i,error[i]);
1368 
1369  globalError+= d*d;
1370  }
1371 
1372  break;
1373  }
1374 
1375 
1376  //
1377  // Backpropagate the error
1378  //
1379 
1380  bpdebug("\nBackpropagate\n");
1381 
1382  p=offsets.size()-1;
1383  for (; b>=0; b--) {
1384 
1385  if (! (net_block[b][0]==0 && net_block[b][5]==1) )
1386  continue;
1387 
1388  //
1389  // First compute the error for the lower layer. This must be done
1390  // before updating the weights (since the error depends on the weights).
1391  // This step is not necessary if the lower layer is the input one.
1392  //
1393 
1394  //
1395  // i iterates over the "lower" layer, j over the "upper"
1396  //
1397 
1398  bpdebug("\tb: %d\n", b);
1399 
1400  if (net_block[b][3]+net_block[b][4] -1 > ninputs) {
1401 
1402  bpdebug("\tnetblock[%d][3]+net_block[%d][4] -1 = %d > %d => Computing the error\n", b,b,net_block[b][3]+net_block[b][4]-1, ninputs);
1403 
1404  for (int i = net_block[b][3]; i<net_block[b][3]+net_block[b][4]; i++) {
1405 
1406  bpdebug("\ti: %d\n", i);
1407  error[i] = 0;
1408 
1409  for (int j= net_block[b][1]; j<net_block[b][1]+net_block[b][2]; j++) {
1410 
1411  error[i]+= error[j] * freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]];
1412  bpdebug("\t\tj: %d\n", j);
1413  bpdebug("\t\terror[%d] += act[j] * freep[ offset[p] + (i-net_block[b][3]) + j*net_block[b][2] ] = act[%d] * freep[ %d + %d + %d*%d ] = %f * %f = %f\n", i,j,offsets[p],(i-net_block[b][3]),(j-net_block[b][1]),net_block[b][2], act[j], freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]], act[j]*freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][2]] );
1414  bpdebug("\t\terror[%d]: %f\n", i,error[i]);
1415  }
1416 
1417  error[i]*= act[i]*(1-act[i]);
1418  bpdebug("\t\terror[%d]: %f\n", i,error[i]);
1419  delta[i] = error[i]*rate;
1420  bpdebug("\t\tdelta[%d]: %f\n", i,delta[i]);
1421  }
1422 
1423  }
1424 
1425  //
1426  // Then modify the weights of the connections from the lower to the upper layer
1427  //
1428 
1429  bpdebug("\n\tUpdating weights\n");
1430  for (int j= net_block[b][1]+net_block[b][2]-1; j>=net_block[b][1]; j--) {
1431  bpdebug("\tj: %d\n", j);
1432 
1433  if (neuronbias[j]) {
1434  freep[bias_ptr--] += delta[j];
1435  bpdebug("\t\tNeuron has bias\n");
1436  bpdebug("\t\t\tfreep[bias_ptr] = freep[ %d ] += delta[%d] = %f\n", bias_ptr+1, j, delta[j]);
1437  bpdebug("\t\t\tfreep[%d]: %f\n", bias_ptr+1, freep[bias_ptr+1]);
1438  }
1439 
1440  for (int i = net_block[b][3]; i<net_block[b][3]+net_block[b][4]; i++) {
1441  freep[ offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4]] += delta[j] * act[i];
1442  bpdebug("\t\ti: %d\n", i);
1443  bpdebug("\t\t\tfreep[ offset[%d] + (i-net_block[%d][3]) + j*net_block[%d][2] ] = freep[ %d + %d + %d*%d = %d ] += delta[%d] * act[%d] = %f * %f = %f\n", p, b,b, offsets[p], i-net_block[b][3], j-net_block[b][1],net_block[b][4],offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4],j,i,delta[j],act[i],delta[j]*act[i] );
1444  bpdebug("\t\t\tfreep[ %d ] = %f\n", offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4], freep[offsets[p] + (i-net_block[b][3]) + (j-net_block[b][1])*net_block[b][4]]);
1445  }
1446 
1447 
1448  }
1449 
1450  p--;
1451 
1452  }
1453 
1454  bpdebug("\n\n");
1455  return globalError;
1456 
1457 
1458  }
1459 */
1460 
1462 
1463  for (int i = 0; i<nneurons; i++) {
1464  printf("act[%d]: %f\n",i,act[i]);
1465  }
1466  }
1467 
1468 
1469 
1470  float Evonet::computeMeanSquaredError(QVector<float> trainingSet, QVector<float> desiredOutput) {
1471 
1472  float err = 0;
1473  int size = trainingSet.size()/ninputs;
1474 // int nOutputToTrain = desiredOutput.size() / size;
1475 
1476  int ptr = 0;
1477  double tmp;
1478 
1479  for (int i = 0; i<nneurons; i++) {
1480  act[i] = 0;
1481  }
1482 
1483 
1484  for (int i = 0; i<size; i++) {
1485 
1486  for (int j = 0; j<ninputs; j++) {
1487  setInput(j, trainingSet[i*ninputs + j]);
1488  }
1489 
1490  updateNet();
1491 
1492  for (int j=0; j<noutputs; j++) {
1493  if (!outputsToTrain[j])
1494  continue;
1495 
1496  tmp = desiredOutput[ptr++] - act[j+ninputs+nhiddens];
1497 // printf("d[%d] - act[%d] = %f - %f = %f\n",ptr-1,j+ninputs+nhiddens,desiredOutput[ptr-1 ], act[j+ninputs+nhiddens], tmp);
1498  err += tmp*tmp*err_weights[j]*err_weights[j];
1499 
1500  if (isinf(err)) {
1501  printf("INF!!\n");
1502  }
1503 
1504  }
1505 
1506  }
1507 
1508 // printf("err: %f\n",err);
1509 
1510  return err / (err_weight_sum*size);
1511 
1512  }
1513 
1514  int Evonet::extractWeightsFromNet(Eigen::VectorXf& w) {
1515 
1516  //*
1517  // This code assumes that input neurons have no bias and
1518  // none of the neurons has gain.
1519  //*
1520 
1521  int wPtr = 0, paramPtr = 0;
1522 
1523  int nbiases = 0;
1524  for (int i = 0; i<nneurons; i++) {
1525  nbiases += (neuronbias[i]==1);
1526  }
1527  paramPtr = nbiases;
1528 
1529  //*
1530  // Search for neurons to train
1531  //*
1532 
1533  for (int b = 0; b<net_nblocks; b++) {
1534 
1535  //*
1536  // Output neurons are treated separately. You need however to go through
1537  // all the blocks since it is not said that the output blocks will all be at the end.
1538  //*
1539 
1540  if (net_block[b][0] != 0)
1541  continue;
1542 
1543  if (net_block[b][5]==1 && !(net_block[b][1]>=ninputs+nhiddens)) {
1544  for (int i = net_block[b][1]; i<net_block[b][1]+net_block[b][2]; i++) {
1545 
1546  if (neuronbias[i]) {
1547 
1548  //*
1549  // If the neuron has a bias, in order to obtain the index of the
1550  // corresponding bias in vector freep, we iterate over all "previous"
1551  // neurons, counting those who have a bias.
1552  //*
1553 
1554  int ptr = 0;
1555  for (int j = 0; j<i; j++) {
1556  if(neuronbias[j])
1557  ptr++;
1558  }
1559 
1560  debug("Adding bias of neuron %d (freep[%d]) in w[%d]\n",i,ptr,wPtr);
1561  w[wPtr++] = freep[ptr];
1562  }
1563 
1564  //*
1565  // Adding weights of the connections of the i-th neurons with neurons
1566  // in the "lower" block
1567  //*
1568 
1569  for (int j = 0; j<net_block[b][4]; j++) {
1570  debug("Adding connection %d of neuron %d (freep[%d]) in w[%d]\n",j,i,paramPtr,wPtr);
1571  w[wPtr++] = freep[paramPtr++];
1572  }
1573  }
1574  }
1575  else
1576  paramPtr+= net_block[b][2]*net_block[b][4];
1577  }
1578 
1579 
1580  //*
1581  // Output neurons are stored sequentially, from the first to the last.
1582  //*
1583 
1584 
1585  for (int i = 0; i<noutputs; i++) {
1586  paramPtr = nbiases;
1587  if (!outputsToTrain[i])
1588  continue;
1589 
1590  int i_freep = i+ninputs+nhiddens;
1591 
1592  if (neuronbias[i_freep]) {
1593 
1594  //*
1595  // If the neuron has a bias, in order to obtain the index of the
1596  // corresponding bias in vector freep, we iterate over all "previous"
1597  // neurons, counting those who have a bias.
1598  //*
1599 
1600  int ptr = 0;
1601  for (int j = 0; j<i_freep; j++) {
1602  if(neuronbias[j])
1603  ptr++;
1604  }
1605 
1606  debug("Adding bias of output %d (freep[%d]) in w[%d]\n",i,ptr,wPtr);
1607  w[wPtr++] = freep[ptr];
1608  }
1609 
1610 
1611 
1612  for (int b = 0; b<net_nblocks; b++) {
1613 
1614  debug("Accessing trainingHiddenBlock[net_block[%d][3] = %d][%d]\n",b,net_block[b][3],i);
1615  if(!(trainingHiddenBlock[net_block[b][3]][i] && inRange(net_block[b][1], ninputs+nhiddens, noutputs) )) {
1616 // if (net_block[b][0] == 0) {
1617  paramPtr+= net_block[b][2]*net_block[b][4];
1618 // }
1619  debug("\tparamPtr: %d\n", paramPtr);
1620  continue;
1621  }
1622 
1623  //*
1624  // Iterate over hidden neurons in the current block
1625  //*
1626 
1627  for (int j = 0; j<net_block[b][4]; j++) {
1628  debug("Adding connection %d of output %d (freep[%d]) in w[%d]\n",j,i_freep,(i_freep-net_block[b][1])*net_block[b][4] + paramPtr,wPtr);
1629  w[wPtr++] = freep[(i_freep-net_block[b][1])*net_block[b][4] + paramPtr++];
1630  }
1631  }
1632 
1633  }
1634 
1635  for (int i = 0; i<w.size(); i++) {
1636  debug("%f\n",w[i]);
1637  }
1638 
1639 
1640  return wPtr;
1641  }
1642 
1643  int Evonet::importWeightsFromVector(Eigen::VectorXf& w) {
1644 
1645  //*
1646  // Inverse procedure of the extraction of the weights (see extractWeightsFromVector() ).
1647  //*
1648 
1649  int wPtr = 0, paramPtr = 0;
1650 
1651  int nbiases = 0;
1652  for (int i = 0; i<nneurons; i++) {
1653  nbiases += (neuronbias[i]==1);
1654  }
1655  paramPtr = nbiases;
1656 
1657  //*
1658  // Search for neurons to train
1659  //*
1660 
1661  for (int b = 0; b<net_nblocks; b++) {
1662 
1663  //*
1664  // Output neurons are treated separately. You need however to go through
1665  // all the blocks since it is not said that the output blocks will all be at the end.
1666  //*
1667 
1668  if (net_block[b][0] != 0)
1669  continue;
1670 
1671  if (net_block[b][5]==1 && !(net_block[b][1]>=ninputs+nhiddens)) {
1672  for (int i = net_block[b][1]; i<net_block[b][1]+net_block[b][2]; i++) {
1673 
1674  if (neuronbias[i]) {
1675 
1676  //*
1677  // If the neuron has a bias, in order to obtain the index of the
1678  // corresponding bias in vector freep, we iterate over all "previous"
1679  // neurons, counting those who have a bias.
1680  //*
1681 
1682  int ptr = 0;
1683  for (int j = 0; j<i; j++) {
1684  if(neuronbias[j])
1685  ptr++;
1686  }
1687 
1688  debug("Adding bias of neuron %d (w[%d]) in freep[%d]\n",i,wPtr,ptr);
1689  freep[ptr] = w[wPtr++];
1690  }
1691 
1692  //*
1693  // Adding weights of the connections of the i-th neurons with neurons
1694  // in the "lower" block
1695  //*
1696 
1697  for (int j = 0; j<net_block[b][4]; j++) {
1698  debug("Adding connection %d of neuron %d (w[%d]) in freep[%d]\n",j,i,wPtr,paramPtr);
1699  freep[paramPtr++] = w[wPtr++];
1700  }
1701  }
1702  }
1703  else
1704  paramPtr+= net_block[b][2]*net_block[b][4];
1705  }
1706 
1707 
1708  //*
1709  // Output neurons are stored sequentially, from the first to the last.
1710  //*
1711 
1712 
1713  for (int i = 0; i<noutputs; i++) {
1714  paramPtr = nbiases;
1715  if (!outputsToTrain[i])
1716  continue;
1717 
1718  int i_freep = i+ninputs+nhiddens;
1719 
1720  if (neuronbias[i_freep]) {
1721 
1722  //*
1723  // If the neuron has a bias, in order to obtain the index of the
1724  // corresponding bias in vector freep, we iterate over all "previous"
1725  // neurons, counting those who have a bias.
1726  //*
1727 
1728  int ptr = 0;
1729  for (int j = 0; j<i_freep; j++) {
1730  if(neuronbias[j])
1731  ptr++;
1732  }
1733  debug("Adding bias of output %d (w[%d]) in freep[%d]\n",i,wPtr,ptr);
1734  freep[ptr] = w[wPtr++];
1735  }
1736 
1737 
1738 
1739  for (int b = 0; b<net_nblocks; b++) {
1740 
1741  if(!(trainingHiddenBlock[net_block[b][3]][i] && inRange(net_block[b][1], ninputs+nhiddens, noutputs) )) {
1742 // if(! trainingHiddenBlock[net_block[b][3]][i]) {
1743  paramPtr+= net_block[b][2]*net_block[b][4];
1744  continue;
1745  }
1746 
1747  //*
1748  // Iterate over hidden neurons in the current block
1749  //*
1750 
1751  for (int j = 0; j<net_block[b][4]; j++) {
1752  debug("Adding connection %d of output %d (w[%d]) in freep[%d]\n",j,i_freep,wPtr,(i_freep-net_block[b][1])*net_block[b][4] + paramPtr);
1753  freep[(i_freep-net_block[b][1])*net_block[b][4] + paramPtr++] = w[wPtr++];
1754  }
1755  }
1756 
1757  }
1758 
1759  return wPtr;
1760  }
1761 
1762  float Evonet::getWeight(int to, int from) {
1763 
1764  debug("Getting w to %d from %d\n", to,from);
1765  int ptr = 0;
1766  for (int i = 0; i<nneurons; i++) {
1767  ptr += neuronbias[i]==1;
1768  }
1769 
1770  for (int b = 0; b<net_nblocks; b++) {
1771  if (inRange(to, net_block[b][1], net_block[b][2]) && inRange(from, net_block[b][3], net_block[b][4])) {
1772  ptr+= (to-net_block[b][1])*net_block[b][4]+(from-net_block[b][3]);
1773  }
1774  else {
1775  ptr+= net_block[b][2]*net_block[b][4];
1776  }
1777  }
1778 
1779  debug("Returning freep[%d]\n", ptr);
1780  if (ptr >= freeParameters()) {
1781  return 0;
1782  }
1783 
1784  return freep[ptr];
1785  }
1786 
1787  void Evonet::setWeight(int to, int from, float w) {
1788 
1789  int ptr = 0;
1790  for (int i = 0; i<nneurons; i++) {
1791  ptr += neuronbias[i]==1;
1792  }
1793 
1794  for (int b = 0; b<net_nblocks; b++) {
1795  if (inRange(to, net_block[b][1], net_block[b][2]) && inRange(from, net_block[b][3], net_block[b][4])) {
1796  ptr+= (to-net_block[b][1])*net_block[b][4]+(from-net_block[b][3]);
1797  }
1798  else {
1799  ptr+= net_block[b][2]*net_block[b][4];
1800  }
1801  }
1802 
1803  freep[ptr] = w;
1804 
1805 
1806  }
1807 
1808  float Evonet::derivative(int /*n*/, float x) {
1809 
1810  return x*(1-x);
1811 
1812  }
1813 
1814  void Evonet::prepareForTraining(QVector<float> &err_w) {
1815 
1816  nconnections = 0;
1817 
1818  //*
1819  // Initializing some variables in order to make easier the computation:
1820  //
1821  // nconnections: number of weights subject to training
1822  // outputsToTrain: outputsToTrain[i]==1 iff the i-th output is subject to training
1823  // n_outputsToTrain: number of output neurons to train
1824  //
1825  //*
1826 
1827 
1828  outputsToTrain = (char*)calloc(noutputs, sizeof(char));
1829  n_outputsToTrain = 0;
1830 
1831  trainingHiddenBlock = (char**)calloc(nneurons, sizeof(char*));
1832  for (int i = 0; i<nneurons; i++) {
1833  trainingHiddenBlock[i] = (char*) calloc(noutputs, sizeof(char));
1834  }
1835 
1836  for (int b=0; b<net_nblocks; b++)
1837 
1838  if (net_block[b][0]==0 && net_block[b][5]==1) {
1839  //*
1840  // If the current block is a connection block and the block is subject to training
1841  // count the connections of the current block
1842  //*
1843 
1844  nconnections += (net_block[b][2]*net_block[b][4]);
1845 
1846  //*
1847  // Sum the bias for the neurons in the current block
1848  //*
1849 
1850  for (int i = net_block[b][1]; i<net_block[b][1]+net_block[b][2]; i++) {
1851  nconnections += (neuronbias[i] == 1);
1852  }
1853 
1854  if (net_block[b][1] >= ninputs+nhiddens) {
1855  memset(outputsToTrain+net_block[b][1]-ninputs-nhiddens, 1, net_block[b][2]*sizeof(char));
1856  n_outputsToTrain += net_block[b][2];
1857 
1858  for(int j=0;j<net_block[b][4];j++)
1859  memset(&trainingHiddenBlock[net_block[b][3]+j][net_block[b][1]-ninputs-nhiddens], 1, net_block[b][2]*sizeof(char));
1860  }
1861 
1862  }
1863 
1864 #ifdef BPDEBUG
1865  printf("n_outputToTrain: %d\n",n_outputsToTrain);
1866  printf("output to train: ");
1867 
1868  for (int i = 0; i<noutputs; i++) {
1869  printf("%d ",outputsToTrain[i]);
1870  }
1871  printf("\n");
1872 
1873  for (int j = 0; j<nneurons; j++) {
1874  for (int i = 0; i<noutputs; i++) {
1875  printf("%d ",trainingHiddenBlock[j][i]);
1876  }
1877  printf("\n");
1878  }
1879 #endif
1880  debug("nconnections: %d\n", nconnections);
1881 
1882  err_weight_sum = 0;
1883  for (int i = 0; i<err_w.size(); i++) {
1884  err_weights.push_back(err_w[i]*err_w[i]);
1885  err_weight_sum+=err_w[i];
1886  }
1887 
1888  printf("err_weight_sum : %f\n",err_weight_sum);
1889 
1890  }
1891 
1893  free(outputsToTrain);
1894  for(int i=0;i<nneurons;i++)
1895  free(trainingHiddenBlock[i]);
1896  free(trainingHiddenBlock);
1897 
1898  }
1899 
1901  {
1902  return showTInput;
1903  }
1904 
1905  /* BACKPROPAGATION ALGORITHMS */
1907  {
1908  return teachingInput[id];
1909  }
1910 
1912  {
1913  return backproperror;
1914  }
1915 
1916  float Evonet::backPropStep(QVector<float> tInput, double rate)
1917  {
1918  int i, t, b;
1919  float diff, temp;
1920  float *p;
1921 
1922  teachingInput = tInput;
1923 
1924  Eigen::MatrixXf weightMatrix(MAXN,MAXN); //a matrix representation of the weights
1925  float delta[MAXN];
1926  float global_error = 0.0;
1927 
1928  // Compute the weightMatrix
1929  p = freep;
1930 
1931  // Skip the gain
1932  for(i = 0; i < nneurons; i++) {
1933  if (neurongain[i] == 1) {
1934  p++;
1935  }
1936  }
1937  // Skip the biases
1938  for(i = 0; i < nneurons; i++) {
1939  if (neuronbias[i] == 1) {
1940  p++;
1941  }
1942  }
1943  // blocks
1944  for (b=0; b < net_nblocks; b++) {
1945  if (net_block[b][0] == 0) {
1946  for(t = net_block[b][1]; t < net_block[b][1] + net_block[b][2]; t++) {
1947  for(i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
1948  weightMatrix(i,t) = *p;
1949  p++;
1950  }
1951  }
1952  }
1953  }
1954 
1955  // First compute the deltas for the output neurons
1956  for (i = ninputs + nhiddens; i < nneurons; i++) {
1957  diff = (tInput[i - (ninputs + nhiddens)] - act[i]);
1958 
1959  delta[i] = diff * act[i] * ((float) 1.0 - act[i]);
1960 
1961  global_error += diff * diff;
1962  }
1963 
1964  // Then compute the deltas for the hidden neurons
1965  for (i = ninputs; i < ninputs + nhiddens; i++) {
1966  temp = (float) 0.0;
1967  for (t = ninputs + nhiddens; t < nneurons; t++) {
1968  temp += delta[t] * (weightMatrix(i,t));
1969  }
1970  delta[i] = ((float) 1.0 - act[i]) * act[i] * temp;
1971  }
1972 
1973  // Lastly, modify the weights
1974  p = freep;
1975 
1976  // Skip the gain
1977  for(i = 0; i < nneurons; i++) {
1978  if (neurongain[i] == 1) {
1979  p++;
1980  }
1981  }
1982 
1983  // Modify the biases
1984  for(i = 0; i < nneurons; i++) {
1985  if (neuronbias[i] == 1) {
1986  float dp_rate = delta[i] * rate;
1987  // modify the weight
1988  *p += (float) 1.0 * dp_rate;
1989  p++;
1990  }
1991  }
1992 
1993  // Update weight blocks
1994  for (b=0; b < net_nblocks; b++) {
1995  if (net_block[b][0] == 0) {
1996  for (t = net_block[b][1]; t < net_block[b][1] + net_block[b][2]; t++) {
1997  for (i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
1998  if (t >= ninputs) {
1999  float dp_rate = delta[t] * rate;
2000  // modify the weight
2001  *p += act[i] * dp_rate;
2002  }
2003  p++;
2004  }
2005  }
2006  }
2007  }
2008 
2009  backproperror = global_error;
2010 
2011  // If a downloader is associated with the neuronsMonitorUploader, uploading activations
2012  if (showTInput && (neuronsMonitorUploader.getDownloader() != NULL)) {
2013  // This call can block if GUI is too slow
2014  DatumToUpload<ActivationsToGui> d(neuronsMonitorUploader);
2015 
2016  d->activations = false;
2017 
2018  // Reserving the correct number of elements, for efficiency reasons
2019  d->data.reserve(nneurons);
2020  d->data = tInput;
2021  d->data.append(backproperror);
2022 
2023  // Adding the current step
2024  d->updatesCounter = updatescounter;
2025  }
2026 
2027  return global_error;
2028  }
2029 
2030  float Evonet::backPropStep2(QVector<float> tInput, double rate)
2031  {
2032  int i, t, b;
2033  float diff, temp;
2034  float *p;
2035  float *bp;
2036 
2037  teachingInput = tInput;
2038 
2039  Eigen::MatrixXf weightMatrix(MAXN,MAXN); //a matrix representation of the weights
2040  float delta[MAXN];
2041  float global_error = 0.0;
2042 
2043  // Compute the weightMatrix
2044  p = freep;
2045 
2046  // Skip the gain
2047  for(i = 0; i < nneurons; i++) {
2048  if (neurongain[i] == 1) {
2049  p++;
2050  }
2051  }
2052  // Skip the biases
2053  for(i = 0; i < nneurons; i++) {
2054  if (neuronbias[i] == 1) {
2055  p++;
2056  }
2057  }
2058  // blocks
2059  for (b=0; b < net_nblocks; b++) {
2060  if (net_block[b][0] == 0) {
2061  for(t = net_block[b][1]; t < net_block[b][1] + net_block[b][2]; t++) {
2062  for(i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
2063  weightMatrix(i,t) = *p;
2064  p++;
2065  }
2066  }
2067  }
2068  }
2069 
2070  // First compute the deltas for the output neurons
2071  for (i = ninputs + nhiddens; i < nneurons; i++) {
2072  diff = (tInput[i - (ninputs + nhiddens)] - act[i]);
2073 
2074  delta[i] = diff * act[i] * ((float) 1.0 - act[i]);
2075 
2076  global_error += diff * diff;
2077  }
2078 
2079  // Then compute the deltas for the hidden neurons
2080  for (i = ninputs; i < ninputs + nhiddens; i++) {
2081  temp = (float) 0.0;
2082  for (t = ninputs + nhiddens; t < nneurons; t++) {
2083  temp += delta[t] * (weightMatrix(i,t));
2084  }
2085  delta[i] = ((float) 1.0 - act[i]) * act[i] * temp;
2086  }
2087 
2088  // Lastly, modify the weights
2089  bp = backpropfreep;
2090 
2091  // Skip the gain
2092  for(i = 0; i < nneurons; i++) {
2093  if (neurongain[i] == 1) {
2094  bp++;
2095  }
2096  }
2097 
2098  // Modify the biases
2099  for(i = 0; i < nneurons; i++) {
2100  if (neuronbias[i] == 1) {
2101  float dp_rate = delta[i] * rate;
2102  // modify the weight
2103  *bp += (float) 1.0 * dp_rate;
2104  bp++;
2105  }
2106  }
2107 
2108  // Update weight blocks
2109  for (b=0; b < net_nblocks; b++) {
2110  if (net_block[b][0] == 0) {
2111  for (t = net_block[b][1]; t < net_block[b][1] + net_block[b][2]; t++) {
2112  for (i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
2113  if (t >= ninputs) {
2114  float dp_rate = delta[t] * rate;
2115  // modify the weight
2116  *bp += act[i] * dp_rate;
2117  }
2118  bp++;
2119  }
2120  }
2121  }
2122  }
2123 
2124  backproperror = global_error;
2125 
2126  // If a downloader is associated with the neuronsMonitorUploader, uploading activations
2127  if (showTInput && (neuronsMonitorUploader.getDownloader() != NULL)) {
2128  // This call can block if GUI is too slow
2129  DatumToUpload<ActivationsToGui> d(neuronsMonitorUploader);
2130 
2131  d->activations = false;
2132 
2133  // Reserving the correct number of elements, for efficiency reasons
2134  d->data.reserve(nneurons);
2135  d->data = tInput;
2136  d->data.append(backproperror);
2137 
2138  // Adding the current step
2139  d->updatesCounter = updatescounter;
2140  }
2141 
2142  return global_error;
2143  }
2144 
2145  void Evonet::calculateBackPropagationError(QVector<float> tInput)
2146  {
2147  int i;
2148  float diff;
2149  float global_error = 0.0;
2150 
2151  // Calculate the backpropagation error
2152  for (i = ninputs + nhiddens; i < nneurons; i++) {
2153  diff = (tInput[i - (ninputs + nhiddens)] - act[i]);
2154 
2155  global_error += diff * diff;
2156  }
2157 
2158  // Save the backpropagation error
2159  backproperror = global_error;
2160  }
2161 
2163  {
2164  float* p;
2165  int i;
2166  p = backpropfreep;
2167 
2168  for (i = 0; i < nparameters; i++)
2169  {
2170  *p = 0.0f;
2171  p++;
2172  }
2173  }
2174 
2176  {
2177  float* p;
2178  float* origp;
2179  float dp;
2180  int i;
2181 
2182  p = freep;
2183  origp = backpropfreep;
2184 
2185  for (i = 0; i < nparameters; i++)
2186  {
2187  // Update freep
2188  dp = *origp;
2189  if (dp != 0)
2190  {
2191  //printf("Index: %d, delta: %.2f\n", i, dp);
2192  }
2193  *p += dp;
2194  // Reset backpropfreep vector
2195  *origp = 0.0f;
2196  origp++;
2197  p++;
2198  }
2199  }
2200  /* END BACKPROPAGATION ALGORITHMS SECTION */
2201 
2202  float Evonet::trainLevembergMarquardt(QVector<float> trainingSet, QVector<float> desiredOutput, float maxError) {
2203 
2204  training = true;
2205 
2206  int pattern, b;
2207 // int lastBlock;
2208  int cycles,i, j;
2209 
2210  double lambda=0.001;
2211  double currentError = 0, previousError= 0;
2212  double delta;
2213 
2214  if (nconnections == 0) {
2215  training = false;
2216  printf("nconnections: 0\nnothing to train\n");
2217  return 0;
2218  }
2219 
2220  int nbiases = 0;
2221  for (i = 0; i<nneurons; i++) {
2222  nbiases += neuronbias[i]==1;
2223  }
2224 
2225  int size = trainingSet.size() / ninputs;
2226  debug("npatters: %d\n", size);
2227 
2228  Eigen::VectorXf err(size*n_outputsToTrain);
2229  Eigen::MatrixXf jacobian(size*n_outputsToTrain, nconnections );
2230 // Eigen::MatrixXf inv_error_weight(size*n_outputsToTrain,size*n_outputsToTrain);
2231 // Eigen::MatrixXf jt_we(size*n_outputsToTrain, nconnections );
2232  Eigen::MatrixXf jj(nconnections,nconnections);
2233 
2234  Eigen::VectorXf new_weights(nconnections);
2235  Eigen::VectorXf old_weights(nconnections);
2236  Eigen::VectorXf ww_err(nconnections);
2237 
2238 
2239  previousError = computeMeanSquaredError(trainingSet, desiredOutput);
2240  printf("Initial error: %f\n",previousError);
2241 
2242 /* if (previousError < maxError) {
2243  printf("Error already below threshold - Nothing to do.\n");
2244  return previousError;
2245  }
2246  */
2247 
2248 // int end = (maxIterations > 0 ? maxIterations : INFINITY);
2250  for (cycles = 0; cycles<end; cycles++) {
2251 
2252  jacobian.setZero();
2253 
2254  extractWeightsFromNet(old_weights);
2255  debug("weights extracted\n");
2256 
2257  //*
2258  // Iterating over patterns. This can be easily parallelized using OpenMP
2259  //*
2260 
2261  for (pattern=0; pattern<size; pattern++) {
2262 
2263  debug("\n\n------------\n\n");
2264  debug("\tpattern: %d\n", pattern);
2265 
2266  //*
2267  // Forward computation
2268  //
2269  // Beware in case of parallel computation: this is a critical section
2270  //*
2271 
2272  for (i = 0; i<ninputs; i++) {
2273  setInput(i, trainingSet[pattern*ninputs + i]);
2274  }
2275 // debug("Before update (%s):\n",__PRETTY_FUNCTION__);
2276 // printAct();
2277  updateNet();
2278 
2279 // debug("after update:\n");
2280 // printAct();
2281 
2282 
2283  //*
2284  // Backward computation
2285  //*
2286 
2287  for(int m = noutputs-1; m>=0; m--) {
2288  if (!outputsToTrain[m])
2289  continue;
2290 
2291  int m_freep = m+ninputs+nhiddens;
2292 
2293  int col_idx = nconnections - 1;
2294  int row_idx = n_outputsToTrain*pattern-1;
2295 
2296  for (i = 0; i<=m; i++) {
2297  row_idx+= outputsToTrain[i];
2298  }
2299 
2300  //*
2301  // Computing error and jacobian relative to the output layer
2302  //*
2303 
2304  err[row_idx] = (desiredOutput[row_idx] - act[m_freep])*err_weights[m];
2305  delta = -derivative(m_freep, act[m_freep])*err_weights[m];
2306 
2307  //*
2308  // Iterating over output neurons
2309  //*
2310 
2311  for(i = noutputs-1; i>=0; i--) {
2312 
2313  if (!outputsToTrain[i])
2314  continue;
2315  //*
2316  // Iterating over net blocks in order to get the neurons connected with i-th output neuron
2317  //*
2318 
2319  for (b=net_nblocks-1; b>=0; b--) {
2320 
2321  //*
2322  // Check if current block is connected to i-th output
2323  //*
2324 // if (net_block[b][5]!=1 && inRange(i+ninputs+nhiddens,net_block[b][3],net_block[b][4]) ) {
2325  if (trainingHiddenBlock[net_block[b][3]][m] && net_block[b][5]==1) {
2326 
2327  for (j=net_block[b][3]+net_block[b][4] -1; j>=net_block[b][3]; j--) {
2328  if (i==m) {
2329  jacobian(row_idx, col_idx--) = delta *act[j];
2330  debug("\t\tcol_idx: %d\n", col_idx+1);
2331  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta,act[j],delta*act[j]);
2332  }
2333  else {
2334  jacobian(row_idx, col_idx--) = 0;
2335  debug("\t\tcol_idx: %d\n", col_idx+1);
2336  debug("\t\tjacobian(%d,%d) = 0\n", row_idx,col_idx+1);
2337  }
2338 
2339 
2340  } //end loop over j
2341 
2342  } // end if
2343 
2344  } //end loop over b
2345 
2346  //*
2347  // Consider the derivative of the error with respect to the bias of the output neuron
2348  //*
2349  if (neuronbias[i+ninputs+nhiddens]) {
2350  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,delta);
2351  jacobian(row_idx, col_idx--) = (i==m ? delta : 0);
2352  }
2353 
2354  } // end loop over i
2355 
2356  //*
2357  // Backpropagating the error over hidden neurons.
2358  // This follows the order of the blocks, from the last to the first.
2359  //
2360  // NOTE: this code assumes the net is a 3 layer net (input-hidden-output).
2361  // It will not work in case of nets with more than 1 hidden layer.
2362  //*
2363  debug("\nBackward computation: hidden layer\n");
2364 
2365 
2366  for (b=net_nblocks-1; b>=0; b--) {
2367 
2368  //*
2369  // If it's not a hidden block subject to training connected to the current (m-th) output
2370  //*
2371 
2372 // if ( !( net_block[b][0]==0 && net_block[b][5]==1 && isHidden(net_block[b][1]) && inRange(m_freep,net_block[b][3],net_block[b][4]) ) )
2373  debug("\ttrainingHiddenBlock[%d][%d]: %d\n", net_block[b][1],m,trainingHiddenBlock[net_block[b][1]][m]);
2374  if (net_block[b][0]!=0 || net_block[b][5] !=1 || ! trainingHiddenBlock[net_block[b][1]][m] )
2375  continue;
2376 
2377  //*
2378  // Iterate over hidden neurons in the current block. The computation is clear knowing the algorithm.
2379  //*
2380 
2381  for(j = net_block[b][1]+net_block[b][2]-1; j>= net_block[b][1]; j--) {
2382 
2383  double delta_h = delta* getWeight(m_freep, j) * derivative(j, act[j]);
2384 
2385  for (int k = net_block[b][3] + net_block[b][4]-1; k>= net_block[b][3]; k--) {
2386  jacobian(row_idx, col_idx--) = delta_h * act[k];
2387  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta_h,act[k],delta*act[k]);
2388  }
2389 
2390  if (neuronbias[j]) {
2391  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,delta_h);
2392  jacobian(row_idx, col_idx--) = delta_h;
2393  }
2394 
2395 
2396  }
2397 
2398 
2399 
2400  } //end loop over b
2401 
2402  } // end loop over m (output neurons)
2403 
2404  } //end pattern
2405 
2406 
2407 
2408  debug("\tAll rows analyzed\n");
2409 
2410 
2411 #ifdef BPDEBUG
2412 // std::cout<<"jacobian:\n"<<jacobian;
2413 // std::cout<<"\n\ndet(j^Tj) :\n"<<(jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant() << "\n";
2414 
2415 // std::cout<<"\n\nerror: " << err.transpose() << "\n";
2416 #endif
2417 
2418  //*
2419  // new_weights = old_weights - (J^T J + lambda I)^-1 J^T e ;
2420  //*
2421 
2422  if (lambda > 100000000 || lambda < 0.000001) {
2423  lambda = 1;
2424  }
2425 
2426 
2427 // jt_we = jacobian.transpose();//*inv_error_weight;
2428  ww_err = jacobian.transpose()*err;
2429  jj = jacobian.transpose()*jacobian;
2430 // printf("det: %lf\n",jj.determinant());
2431 
2432 /* if (std::isnan((jacobian.transpose()*jacobian).determinant()) ) {
2433 
2434  printf("nan determinant : %f\n", (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant());
2435 
2436 
2437 
2438  FILE *fp = fopen("/Users/Manlio/Desktop/matrix.txt", "w");
2439 
2440  Eigen::MatrixXf pj(nconnections,nconnections);
2441 
2442  pj = jacobian.transpose()*jacobian;
2443 
2444 
2445  for(int i = 0;i<nconnections; i++)
2446  for(int j=0;j<nconnections; j++)
2447  if (!(std::isnormal(pj(i,j)) || pj(i,j)==0 ) ) {
2448  printf("(%d,%d) : %f\n",i,j,pj(i,j));
2449  }
2450 
2451 
2452 
2453  for (int a =0; a<nconnections; a++) {
2454  for (int b = 0; b<nconnections; b++) {
2455  fprintf(fp, "%f ", pj(a,b));
2456  }
2457  fprintf(fp, "\n");
2458  }
2459 
2460  fclose(fp);
2461  exit(0);
2462 
2463  }
2464 */
2465  for (int retry = 0; retry<6; retry++, lambda*=10) {
2466 
2467  debug("\tlambda: %f\n", lambda);
2468 
2469 // new_weights = old_weights - (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).inverse()*jacobian.transpose()*err;
2470  new_weights = old_weights - (jj + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).ldlt().solve(ww_err);
2471 
2472  // printf("\tdet(j^Tj) : %f -- norm(j^T e) : %f\n",(jacobian.transpose()*jacobian).determinant(), (jacobian.transpose()*err).norm() );
2473 // printf("norm wb2: %f\n",new_weights.norm());
2474 // exit(0);
2475 
2476 // std::cout<<"\n\nnew_weights: " << new_weights.transpose() << "\n";
2477 
2478 
2479  importWeightsFromVector(new_weights);
2480 
2481  currentError = computeMeanSquaredError(trainingSet, desiredOutput);
2482 
2483  printf("iteration: %d err: %f lambda: %f\n",cycles,currentError,lambda);
2484 
2485  debug("currentError: %f\n",currentError);
2486 
2487  if (currentError <= maxError)
2488  return currentError;
2489  if ((new_weights-old_weights).norm() < 0.0001) {
2490  printf("Minimum gradient reached\n");
2491  return currentError;
2492  }
2493 
2494  if (currentError > previousError) {
2495  importWeightsFromVector(old_weights);
2496  }
2497  else {
2498  previousError = currentError;
2499  lambda/=10;
2500  break;
2501  }
2502 
2503  }
2504 // exit(1);
2505  }
2506 
2507  training = false;
2508  return currentError;
2509 
2510  }
2511 
2512  float Evonet::trainLevembergMarquardtThroughTime(QVector<float> trainingSet, QVector<float> desiredOutput, int time, float maxError) {
2513 
2514  training = true;
2515 
2516  int pattern, b;
2517  // int lastBlock;
2518  int cycles,i, j;
2519 
2520  double lambda=0.001;
2521  double currentError = 0, previousError= 0;
2522  double delta;
2523 
2524  if (nconnections == 0) {
2525  training = false;
2526  printf("nconnections: 0\nnothing to train\n");
2527  return 0;
2528  }
2529 
2530  int nbiases = 0;
2531  for (i = 0; i<nneurons; i++) {
2532  nbiases += neuronbias[i]==1;
2533  }
2534 
2535  int size = trainingSet.size() / ninputs;
2536  debug("npatters: %d\n", size);
2537 
2538  Eigen::VectorXf oldActivations(time*nhiddens);
2539  oldActivations.setZero();
2540 
2541  Eigen::VectorXf err(size*n_outputsToTrain);
2542  Eigen::MatrixXf jacobian(size*n_outputsToTrain, nconnections );
2543 // Eigen::MatrixXf inv_error_weight(size*n_outputsToTrain,size*n_outputsToTrain);
2544 // Eigen::MatrixXf jt_we(size*n_outputsToTrain, nconnections );
2545  Eigen::MatrixXf jj(nconnections,nconnections);
2546 
2547  Eigen::VectorXf new_weights(nconnections);
2548  Eigen::VectorXf old_weights(nconnections);
2549  Eigen::VectorXf ww_err(nconnections);
2550 
2551 
2552 
2553  previousError = computeMeanSquaredError(trainingSet, desiredOutput);
2554  printf("Initial error: %f\n",previousError);
2555 
2556  /* if (previousError < maxError) {
2557  printf("Error already below threshold - Nothing to do.\n");
2558  return previousError;
2559  }
2560  */
2561 
2562 // int end = (maxIterations > 0 ? maxIterations : INFINITY);
2564  for (cycles = 0; cycles<end; cycles++) {
2565 
2566  jacobian.setZero();
2567 
2568  extractWeightsFromNet(old_weights);
2569  debug("weights extracted\n");
2570 
2571  //*
2572  // Iterating over patterns. This can be easily parallelized using OpenMP
2573  //*
2574 
2575  for (pattern=0; pattern<size; pattern++) {
2576 
2577  debug("\n\n------------\n\n");
2578  debug("\tpattern: %d\n", pattern);
2579 
2580  //*
2581  // Forward computation
2582  //
2583  // Beware in case of parallel computation: this is a critical section
2584  //*
2585 
2586  for (i = 0; i<ninputs; i++) {
2587  setInput(i, trainingSet[pattern*ninputs + i]);
2588  }
2589  // debug("Before update (%s):\n",__PRETTY_FUNCTION__);
2590  // printAct();
2591  updateNet();
2592 
2593  // debug("after update:\n");
2594  // printAct();
2595 
2596 
2597  //*
2598  // Backward computation
2599  //*
2600 
2601  for(int m = noutputs-1; m>=0; m--) {
2602 
2603  debug("m: %d\n", m);
2604  if (!outputsToTrain[m])
2605  continue;
2606 
2607  int m_freep = m+ninputs+nhiddens;
2608 
2609  int col_idx = nconnections - 1;
2610  int row_idx = n_outputsToTrain*pattern-1;
2611 
2612  debug("row_idx: %d\n", row_idx);
2613  for (i = 0; i<=m; i++) {
2614  row_idx+= outputsToTrain[i];
2615  }
2616  debug("row_idx: %d\n", row_idx);
2617  //*
2618  // Computing error and jacobian relative to the output layer
2619  //*
2620 
2621  err[row_idx] = (desiredOutput[row_idx] - act[m_freep])*err_weights[m];
2622  delta = -derivative(m_freep, act[m_freep])*err_weights[m];
2623 
2624  //*
2625  // Iterating over output neurons
2626  //*
2627 
2628  for(i = noutputs-1; i>=0; i--) {
2629 
2630  debug("\toutput: %d\n", i);
2631  if (!outputsToTrain[i])
2632  continue;
2633  //*
2634  // Iterating over net blocks in order to get the neurons connected with i-th output neuron
2635  //*
2636 
2637  for (b=net_nblocks-1; b>=0; b--) {
2638 
2639  //*
2640  // Check if current block is connected to i-th output
2641  //*
2642  // if (net_block[b][5]!=1 && inRange(i+ninputs+nhiddens,net_block[b][3],net_block[b][4]) ) {
2643  if (trainingHiddenBlock[net_block[b][3]][m]) {
2644 
2645  for (j=net_block[b][3]+net_block[b][4] -1; j>=net_block[b][3]; j--) {
2646  if (i==m) {
2647  jacobian(row_idx, col_idx--) = delta *act[j];
2648  debug("\t\tcol_idx: %d\n", col_idx+1);
2649  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta,act[j],delta*act[j]);
2650  }
2651  else {
2652  jacobian(row_idx, col_idx--) = 0;
2653  debug("\t\tcol_idx: %d\n", col_idx+1);
2654  debug("\t\tjacobian(%d,%d) = 0\n", row_idx,col_idx+1);
2655  }
2656 
2657 
2658  } //end loop over j
2659 
2660  } // end if
2661 
2662  } //end loop over b
2663 
2664  //*
2665  // Consider the derivative of the error with respect to the bias of the output neuron
2666  //*
2667  if (neuronbias[i+ninputs+nhiddens]) {
2668  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,(i==m ? delta : 0));
2669  jacobian(row_idx, col_idx--) = (i==m ? delta : 0);
2670  }
2671 
2672  } // end loop over i
2673 
2674  //*
2675  // Backpropagating the error over hidden neurons.
2676  // This follows the order of the blocks, from the last to the first.
2677  //
2678  // NOTE: this code assumes the net is a 3 layer net (input-hidden-output).
2679  // It will not work in case of nets with more than 1 hidden layer.
2680  //*
2681  debug("\nBackward computation: hidden layer\n");
2682 
2683 
2684  for (b=net_nblocks-1; b>=0; b--) {
2685 
2686  //*
2687  // If it's not a hidden block subject to training connected to the current (m-th) output
2688  //*
2689 
2690  // if ( !( net_block[b][0]==0 && net_block[b][5]==1 && isHidden(net_block[b][1]) && inRange(m_freep,net_block[b][3],net_block[b][4]) ) )
2691  debug("\ttrainingHiddenBlock[%d][%d]: %d\n", net_block[b][1],m,trainingHiddenBlock[net_block[b][1]][m]);
2692  if (net_block[b][0]!=0 || ! trainingHiddenBlock[net_block[b][1]][m] )
2693  continue;
2694 
2695  //*
2696  // Iterate over hidden neurons in the current block. The computation is clear knowing the algorithm.
2697  //*
2698 #if defined(__GNUC__) && defined(DEVELOPER_WARNINGS)
2699  #warning The trainLevembergMarquardtThroughTime method requires that all the connections to a particular hidden block are in the same net_block.
2700 #endif
2701 
2702  for(j = net_block[b][1]+net_block[b][2]-1; j>= net_block[b][1]; j--) {
2703 
2704  double delta_h = delta* getWeight(m_freep, j) * derivative(j, act[j]);
2705 
2706  for (int k = net_block[b][3] + net_block[b][4]-1; k>= net_block[b][3]; k--) {
2707 
2708  jacobian(row_idx, col_idx--) = delta_h * (isHidden(k) ? oldActivations[k-ninputs] : act[k]);
2709  debug("\t\tjacobian(%d,%d) = %f * %f = %f\n", row_idx,col_idx+1,delta_h,act[k],delta*act[k]);
2710  }
2711 
2712  if (neuronbias[j]) {
2713  debug("\t\tjacobian(%d,%d) = %f\n", row_idx,col_idx,delta_h);
2714  jacobian(row_idx, col_idx--) = delta_h;
2715  }
2716 
2717 
2718  }
2719 
2720 
2721 
2722  } //end loop over b
2723 
2724  } // end loop over m (output neurons)
2725 
2726 
2727  //*
2728  // Updating the old activations
2729  //*
2730 
2731  for (int i = 0; i<nhiddens; i++) {
2732  oldActivations[i] = act[i+ninputs];
2733  }
2734 
2735 
2736 
2737 
2738  } //end pattern
2739 
2740 
2741 
2742  debug("\tAll rows analyzed\n");
2743 
2744 
2745 #ifdef BPDEBUG
2746  // std::cout<<"jacobian:\n"<<jacobian;
2747  // std::cout<<"\n\ndet(j^Tj) :\n"<<(jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant() << "\n";
2748 
2749  // std::cout<<"\n\nerror: " << err.transpose() << "\n";
2750 #endif
2751 
2752  //*
2753  // new_weights = old_weights - (J^T J + lambda I)^-1 J^T e ;
2754  //*
2755 
2756  if (lambda > 100000000 || lambda < 0.000001) {
2757  lambda = 1;
2758  }
2759 
2760 
2761  // jt_we = jacobian.transpose();//*inv_error_weight;
2762  ww_err = jacobian.transpose()*err;
2763  jj = jacobian.transpose()*jacobian;
2764  // printf("det: %lf\n",jj.determinant());
2765 
2766  /* if (std::isnan((jacobian.transpose()*jacobian).determinant()) ) {
2767 
2768  printf("nan determinant : %f\n", (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).determinant());
2769 
2770 
2771 
2772  FILE *fp = fopen("/Users/Manlio/Desktop/matrix.txt", "w");
2773 
2774  Eigen::MatrixXf pj(nconnections,nconnections);
2775 
2776  pj = jacobian.transpose()*jacobian;
2777 
2778 
2779  for(int i = 0;i<nconnections; i++)
2780  for(int j=0;j<nconnections; j++)
2781  if (!(std::isnormal(pj(i,j)) || pj(i,j)==0 ) ) {
2782  printf("(%d,%d) : %f\n",i,j,pj(i,j));
2783  }
2784 
2785 
2786 
2787  for (int a =0; a<nconnections; a++) {
2788  for (int b = 0; b<nconnections; b++) {
2789  fprintf(fp, "%f ", pj(a,b));
2790  }
2791  fprintf(fp, "\n");
2792  }
2793 
2794  fclose(fp);
2795  exit(0);
2796 
2797  }
2798  */
2799  for (int retry = 0; retry<6; retry++, lambda*=10) {
2800 
2801  debug("\tlambda: %f\n", lambda);
2802 
2803  // new_weights = old_weights - (jacobian.transpose()*jacobian + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).inverse()*jacobian.transpose()*err;
2804  new_weights = old_weights - (jj + lambda*Eigen::MatrixXf::Identity(nconnections,nconnections) ).ldlt().solve(ww_err);
2805 
2806  // printf("\tdet(j^Tj) : %f -- norm(j^T e) : %f\n",(jacobian.transpose()*jacobian).determinant(), (jacobian.transpose()*err).norm() );
2807  // printf("norm wb2: %f\n",new_weights.norm());
2808  // exit(0);
2809 
2810  // std::cout<<"\n\nnew_weights: " << new_weights.transpose() << "\n";
2811 
2812 
2813  importWeightsFromVector(new_weights);
2814 
2815  currentError = computeMeanSquaredError(trainingSet, desiredOutput);
2816 
2817  printf("iteration: %d err: %f lambda: %f\n",cycles,currentError,lambda);
2818 
2819  debug("currentError: %f\n",currentError);
2820 
2821  if (currentError <= maxError)
2822  return currentError;
2823  if ((new_weights-old_weights).norm() < 0.0001) {
2824  printf("Minimum gradient reached\n");
2825  return currentError;
2826  }
2827 
2828  if (currentError > previousError) {
2829  importWeightsFromVector(old_weights);
2830  }
2831  else {
2832  previousError = currentError;
2833  lambda/=10;
2834  break;
2835  }
2836 
2837  }
2838  // exit(1);
2839  }
2840 
2841  training = false;
2842  return currentError;
2843 
2844  }
2845 
2846  int Evonet::importWeightsFromMATLABFile(char *path) {
2847 
2848  //*
2849  // NOTE: This code has been written just to work. It assumes the net has
2850  // only two blocks to train: one connecting inputs with hidden neurons
2851  // and the other one connecting hidden with outputs.
2852  //*
2853 
2854  FILE *fp = fopen(path, "r");
2855 
2856  if (!fp)
2857  return -1 ;
2858 
2859  int biasptr = 0;
2860  int wptr = 0;
2861 
2862  for (int i = 0; i<nneurons; i++) {
2863  wptr += (neuronbias[i]==1);
2864  }
2865 
2866 
2867  int b;
2868 
2869  for (b = 0; b<net_nblocks; b++) {
2870  if (net_block[b][5]==1) {
2871  for (int i = 0; i<net_block[b][4]; i++) {
2872  for (int j = 0; j<net_block[b][2]; j++) {
2873  fscanf(fp, "%f", &freep[ wptr+i+j*net_block[b][4] ]);
2874 // printf("setting weight to %d from %d in freep[%d] (%f)\n",j+net_block[b][1],i+net_block[b][3],wptr+i+j*net_block[b][4],freep[wptr+i+j*net_block[b][4]]);
2875  }
2876  }
2877 
2878  wptr+=net_block[b][2]*net_block[b][4];
2879 
2880  biasptr=0;
2881 
2882  for (int j=0; j<net_block[b][1]; j++) {
2883  if (neuronbias[j]) {
2884  biasptr++;
2885  }
2886  }
2887 
2888  for (int i =0; i<net_block[b][2]; i++) {
2889  fscanf(fp, "%f", &freep[biasptr++]);
2890 // printf("setting bias of %d in freep[%d] (%f)\n",i+net_block[b][1],biasptr-1,freep[biasptr-1]);
2891  }
2892  }
2893  else if (net_block[b][0]==0) {
2894  wptr+= net_block[b][2]*net_block[b][4];
2895 /* for (int i = net_block[b][3]; i<net_block[b][3]+ net_block[b][4]; i++) {
2896  if (neuronbias[i]) {
2897  biasptr++;
2898  }
2899  }
2900 */
2901  }
2902  }
2903 
2904  fclose(fp);
2905 
2906  return 0;
2907  }
2908 
2909  int Evonet::exportWeightsToMATLABFile(char *path) {
2910 
2911  //*
2912  // NOTE: This code has been written just to work. It assumes the net has
2913  // only two blocks to train: one connecting inputs with hidden neurons
2914  // and the other one connecting hidden with outputs.
2915  //*
2916 
2917  FILE *fp = fopen(path, "w");
2918 
2919  if (!fp)
2920  return -1 ;
2921 
2922  int biasptr = 0;
2923  int wptr = 0;
2924 
2925  for (int i = 0; i<nneurons; i++){
2926  wptr += (neuronbias[i]==1);
2927  }
2928 
2929 
2930  int b;
2931 
2932  for (b = 0; b<net_nblocks; b++) {
2933  if (net_block[b][5]==1) {
2934  for (int i = 0; i<net_block[b][4]; i++) {
2935  for (int j = 0; j<net_block[b][2]; j++) {
2936  fprintf(fp, "%f\n", freep[ wptr+i+j*net_block[b][4] ]);
2937 // printf("setting weight to %d from %d in freep[%d] (%f)\n",j+net_block[b][1],i+net_block[b][3],wptr+i+j*net_block[b][4],freep[wptr+i+j*net_block[b][4]]);
2938  }
2939  }
2940 
2941  wptr+=net_block[b][2]*net_block[b][4];
2942 
2943  biasptr=0;
2944 
2945  for (int j=0; j<net_block[b][1]; j++) {
2946  if (neuronbias[j]) {
2947  biasptr++;
2948  }
2949  }
2950 
2951  for (int i =0; i<net_block[b][2]; i++) {
2952  fprintf(fp, "%f\n", freep[biasptr++]);
2953 // printf("setting bias of %d in freep[%d] (%f)\n",i+net_block[b][1],biasptr-1,freep[biasptr-1]);
2954  }
2955  }
2956  else if (net_block[b][0]==0) {
2957  wptr+= net_block[b][2]*net_block[b][4];
2958  /* for (int i = net_block[b][3]; i<net_block[b][3]+ net_block[b][4]; i++) {
2959  if (neuronbias[i]) {
2960  biasptr++;
2961  }
2962  }
2963  */
2964  }
2965  }
2966 
2967  fclose(fp);
2968 
2969  return 0;
2970  }
2971 
2973  //Serve ad impostare i pesi del riflesso
2974  //
2975  //Si basa su alcune assunzioni sulla struttura della rete. Non generale!
2976  //
2977 
2978  int p = 0;
2979  for (int i = 0; i<nneurons; i++) {
2980  p += neuronbias[i]==1;
2981  }
2982 
2983  freep[p++] = 0;
2984  freep[p++] = 5;
2985  freep[p++] = -5;
2986  freep[p] = 0;
2987 
2988 // printf("%s: p: %d\n",__PRETTY_FUNCTION__,p);
2989 // freep[14] = 0;
2990 // freep[15] = 0;
2991 
2992  }
2993 
2994 } // end namespace farsa
2995 
2996 
2997 // All the suff below is to restore the warning state on Windows
2998 #if defined(_MSC_VER)
2999  #pragma warning(pop)
3000 #endif
3001 
3002 
3003 
3004 /*
3005  for (i = net_block[b][3]; i<net_block[b][3]+net_block[b][4]; i++) {
3006  err[i] = 0;
3007 
3008  for (j = net_block[b][1]; j<net_block[b][1]+net_block[b][2]; j++) {
3009  err[i] += freep[ngains+nbias+]
3010  }
3011  }
3012  for (i = net_block[b][3]; i < net_block[b][3] + net_block[b][4]; i++) {
3013  if (t >= ninputs) {
3014  float dp_rate = delta[t] * rate;
3015  // calculate the delta for the lower neuron
3016  delta[i] += delta[t] * *p;
3017  // modify the weight
3018  *p += act[i] * dp_rate;
3019  }
3020  p++;
3021  }
3022 
3023  */