sensors.cpp
1 /********************************************************************************
2  * FARSA Experiments Library *
3  * Copyright (C) 2007-2012 *
4  * Gianluca Massera <emmegian@yahoo.it> *
5  * Stefano Nolfi <stefano.nolfi@istc.cnr.it> *
6  * Tomassino Ferrauto <tomassino.ferrauto@istc.cnr.it> *
7  * Onofrio Gigliotta <onofrio.gigliotta@istc.cnr.it> *
8  * *
9  * This program is free software; you can redistribute it and/or modify *
10  * it under the terms of the GNU General Public License as published by *
11  * the Free Software Foundation; either version 2 of the License, or *
12  * (at your option) any later version. *
13  * *
14  * This program is distributed in the hope that it will be useful, *
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17  * GNU General Public License for more details. *
18  * *
19  * You should have received a copy of the GNU General Public License *
20  * along with this program; if not, write to the Free Software *
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA *
22  ********************************************************************************/
23 
24 #include "sensors.h"
25 #include "configurationhelper.h"
26 #include "motorcontrollers.h"
27 #include "logger.h"
28 #include "graphicalwobject.h"
29 #include "arena.h"
30 #include <QStringList>
31 #include <QList>
32 #include <QtAlgorithms>
33 #include <limits>
34 #include <cmath>
35 #include <QLinkedList>
36 
37 namespace farsa {
38 
39 //ObjectPositionSensor : begin implementation
40 // it returns the absolute coordinate of an object into the world
42  Sensor(params, prefix) {
43  neuronsIteratorResource = ConfigurationHelper::getString(params, prefix + "neuronsIterator", "neuronsIterator");
44  objectName = ConfigurationHelper::getString( params, prefix+"object", "object" );
45  QVector<double> vec1 = ConfigurationHelper::getVector( params, prefix+"bbMin" );
46  QVector<double> vec2 = ConfigurationHelper::getVector( params, prefix+"bbMax" );
47  if ( vec1.size() == 3 && vec2.size() == 3 ) {
48  linearize = true;
49  bbMin = wVector( vec1[0], vec1[1], vec1[2] );
50  bbMax = wVector( vec2[0], vec2[1], vec2[2] );
51  } else {
52  linearize = false;
53  if ( ! (vec1.isEmpty() && vec2.isEmpty()) ) {
54  Logger::warning( QString("ObjectPositionSensor %1 - bbMin and/or bbMax parameters are not well specified; they will be ignored").arg(name()) );
55  }
56  }
57 
58  // Declaring the resources that are needed here
59  usableResources( QStringList() << objectName << neuronsIteratorResource );
60 }
61 
63  // nothing to do
64 }
65 
66 void ObjectPositionSensor::describe( QString type ) {
67  Sensor::describe( type );
68  Descriptor d = addTypeDescription( type, "Sensor for reading the three absolute coordinate (position into the worlf frame) of an object" );
69  d.describeString("neuronsIterator").def("neuronsIterator").help("the name of the resource associated with the neural network iterator (default is \"neuronsIterator\")");
70  d.describeString( "object" ).def( "object" ).props( IsMandatory ).help( "The name of the resource associated with the object to track with this sensor" );
71  d.describeReal( "bbMin" ).props( IsList ).help( "The minimum 3D point used for linearize the object position into [0,1]" );
72  d.describeReal( "bbMax" ).props( IsList ).help( "The maximum 3D point used for linearize the object position into [0,1]" );
73 }
74 
76  // Checking all resources we need exist
78 
79  // Acquiring the lock to get resources
80  ResourcesLocker locker( this );
81 
82  WObject* object = getResource<WObject>( objectName );
83  wVector pos = object->matrix().w_pos;
84  NeuronsIterator* evonetIt = getResource<NeuronsIterator>( neuronsIteratorResource );
85  evonetIt->setCurrentBlock( name() );
86  for( int i=0; i<3; i++, evonetIt->nextNeuron() ) {
87  if ( linearize ) {
88  // linearize into [0,1]
89  evonetIt->setInput( linearMap( pos[i], bbMin[i], bbMax[i], 0, 1 ) );
90  } else {
91  evonetIt->setInput( pos[i] );
92  }
93  }
94 }
95 
97  return 3;
98 }
99 
100 void ObjectPositionSensor::resourceChanged(QString resourceName, ResourceChangeType changeType) {
101  if (changeType == Deleted) {
103  return;
104  }
105 
106  if (resourceName == objectName) {
107  // Nothing to do here, we get the object with getResource() in update()
108  } else if (resourceName == neuronsIteratorResource) {
109  NeuronsIterator* evonetIt = getResource<NeuronsIterator>();
110  evonetIt->setCurrentBlock( name() );
111  for( int i=0; i<3; i++, evonetIt->nextNeuron() ) {
112  evonetIt->setGraphicProperties( QString("obj")+QString::number(i), -10.0, 10.0, Qt::red );
113  }
114  } else {
115  Logger::info("Unknown resource " + resourceName + " for " + name());
116  }
117 }
118 
120 {
121  Sensor::save( params, prefix );
122  params.startObjectParameters( prefix, "ObjectPositionSensor", this );
123  params.createParameter(prefix, "neuronsIterator", neuronsIteratorResource);
124  params.createParameter( prefix, "object", objectName );
125  if ( linearize ) {
126  params.createParameter( prefix, "bbMin", QString("%1 %2 %3").arg(bbMin[0]).arg(bbMin[1]).arg(bbMin[2]) );
127  params.createParameter( prefix, "bbMax", QString("%1 %2 %3").arg(bbMax[0]).arg(bbMax[1]).arg(bbMax[2]) );
128  }
129 }
130 //ObjectPositionSensor : end implementation
131 
132 namespace __LinearCamera_internal {
133  #ifndef GLMultMatrix
134  #define GLMultMatrix glMultMatrixf
135  // for double glMultMatrixd
136  #endif
137 
141  const float linearCameraCubeSide = 0.02f;
142 
147  const float linearCameraReceptorsLength = 0.1f;
148 
153  {
154  public:
168  LinearCameraGraphic(WObject *object, const wMatrix& transformation, double minAngle, double maxAngle, unsigned int numReceptors, QString name = "unamed") :
169  GraphicalWObject(object->world(), name),
170  m_object(object),
171  m_transformation(transformation),
172  m_minAngle(minAngle),
173  m_maxAngle(maxAngle),
174  m_numReceptors(numReceptors),
176  m_receptors(m_numReceptors, Qt::black)
177  {
178  // Attaching to object (which also becomes our owner)
179  attachToObject(m_object, true);
180 
181  // We also use our own color and texture
183  setTexture("");
184  setColor(Qt::white);
185  }
186 
191  {
192  }
193 
200  void setPerceivedColors(const QVector<QColor>& receptors)
201  {
202  m_receptorsMutex.lock();
203  m_receptors = receptors;
204  m_receptorsMutex.unlock();
205  }
206 
207  protected:
215  virtual void render(RenderWObject* renderer, QGLContext* gw)
216  {
217  // Bringing the frame of reference at the center of the camera
218  wMatrix mtr = m_transformation * tm;
219  glPushMatrix();
220  renderer->container()->setupColorTexture(gw, renderer);
221  GLMultMatrix(&mtr[0][0]);
222 
223  // First of all drawing the camera as a small white box. The face in the
224  // direction of view (X axis) is painted half green: the green part is the
225  // one in the direction of the upvector (Z axis)
226  glBegin(GL_QUADS);
227  const float hside = linearCameraCubeSide / 2.0;
228 
229  // front (top part)
230  glColor3f(0.0, 1.0, 0.0);
231  glNormal3f(1.0, 0.0, 0.0);
232  glVertex3f( hside, -hside, hside);
233  glVertex3f( hside, -hside, 0.0);
234  glVertex3f( hside, hside, 0.0);
235  glVertex3f( hside, hside, hside);
236 
237  // front (bottom part)
238  glColor3f(1.0, 1.0, 1.0);
239  glNormal3f(1.0, 0.0, 0.0);
240  glVertex3f( hside, -hside, 0.0);
241  glVertex3f( hside, -hside, -hside);
242  glVertex3f( hside, hside, -hside);
243  glVertex3f( hside, hside, 0.0);
244 
245  // back
246  glNormal3f(-1.0, 0.0, 0.0);
247  glVertex3f(-hside, -hside, -hside);
248  glVertex3f(-hside, -hside, hside);
249  glVertex3f(-hside, hside, hside);
250  glVertex3f(-hside, hside, -hside);
251 
252  // top
253  glNormal3f(0.0, 1.0, 0.0);
254  glVertex3f(-hside, hside, hside);
255  glVertex3f( hside, hside, hside);
256  glVertex3f( hside, hside, -hside);
257  glVertex3f(-hside, hside, -hside);
258 
259  // bottom
260  glNormal3f(0.0, -1.0, 0.0);
261  glVertex3f(-hside, -hside, -hside);
262  glVertex3f( hside, -hside, -hside);
263  glVertex3f( hside, -hside, hside);
264  glVertex3f(-hside, -hside, hside);
265 
266  // right
267  glNormal3f(0.0, 0.0, 1.0);
268  glVertex3f( hside, -hside, hside);
269  glVertex3f(-hside, -hside, hside);
270  glVertex3f(-hside, hside, hside);
271  glVertex3f( hside, hside, hside);
272 
273  // left
274  glNormal3f(0.0, 0.0, -1.0);
275  glVertex3f( hside, -hside, -hside);
276  glVertex3f(-hside, -hside, -hside);
277  glVertex3f(-hside, hside, -hside);
278  glVertex3f( hside, hside, -hside);
279 
280  glEnd();
281 
282  // Now we draw white lines to separare the various sectors of the camera
283  // Disabling lighting here (we want pure lines no matter from where we look at them)
284  glPushAttrib(GL_LIGHTING_BIT);
285  glDisable(GL_LIGHTING);
286  glLineWidth(2.5);
287  glColor3f(1.0, 1.0, 1.0);
288 
289  // Drawing the lines
290  glBegin(GL_LINES);
291  for (unsigned int i = 0; i <= m_numReceptors; i++) {
292  const double curAngle = m_minAngle + double(i) * m_receptorRange;
293 
294  const wVector lineEnd = wVector(cos(curAngle), sin(curAngle), 0.0).scale(linearCameraReceptorsLength);
295 
296  glVertex3f(0.0, 0.0, 0.0);
297  glVertex3f(lineEnd.x, lineEnd.y, lineEnd.z);
298  }
299  glEnd();
300 
301  // Now drawing the state of receptors. Here we also have to lock the semaphore for
302  // the m_receptors vector
303  m_receptorsMutex.lock();
304 
305  // Drawing the status
306  glBegin(GL_QUADS);
307  glNormal3f(0.0, 1.0, 0.0);
308  const double colorPatchAngle = m_receptorRange / 3.0;
309  const double colorPatchMinLength = linearCameraReceptorsLength / 3.0;
310  const double colorPatchMaxLength = 2.0 * linearCameraReceptorsLength / 3.0;
311  for (unsigned int i = 0; i < m_numReceptors; i++) {
312  const double curAngle = m_minAngle + double(i) * m_receptorRange;
313 
314  for (unsigned int c = 0; c < 3; c++) {
315  const double startAngle = curAngle + double(c) * colorPatchAngle;
316  const double endAngle = curAngle + double(c + 1) * colorPatchAngle;
317 
318  // Computing the four vertexes
319  const wVector v1 = wVector(cos(startAngle), sin(startAngle), 0.0).scale(colorPatchMinLength);
320  const wVector v2 = wVector(cos(startAngle), sin(startAngle), 0.0).scale(colorPatchMaxLength);
321  const wVector v3 = wVector(cos(endAngle), sin(endAngle), 0.0).scale(colorPatchMaxLength);
322  const wVector v4 = wVector(cos(endAngle), sin(endAngle), 0.0).scale(colorPatchMinLength);
323 
324  // Setting the color
325  switch (c) {
326  case 0:
327  glColor3f(m_receptors[i].redF(), 0.0, 0.0);
328  break;
329  case 1:
330  glColor3f(0.0, m_receptors[i].greenF(), 0.0);
331  break;
332  case 2:
333  glColor3f(0.0, 0.0, m_receptors[i].blueF());
334  break;
335  default:
336  break;
337  }
338 
339  // Drawing the patch
340  glVertex3f(v1.x, v1.y, v1.z);
341  glVertex3f(v2.x, v2.y, v2.z);
342  glVertex3f(v3.x, v3.y, v3.z);
343  glVertex3f(v4.x, v4.y, v4.z);
344  }
345  }
346  glEnd();
347  m_receptorsMutex.unlock();
348 
349  // Restoring lighting status
350  glPopAttrib();
351 
352  glPopMatrix();
353  }
354 
359 
365 
369  const double m_minAngle;
370 
374  const double m_maxAngle;
375 
379  const unsigned int m_numReceptors;
380 
386  const double m_receptorRange;
387 
391  QVector<QColor> m_receptors;
392 
400  };
401 }
402 
403 using namespace __LinearCamera_internal;
404 
405 LinearCamera::LinearCamera(WObject* obj, wMatrix mtr, double aperture, unsigned int numReceptors, QColor backgroundColor) :
407  m_receptors(numReceptors),
408  m_object(obj),
409  m_transformation(mtr),
410  m_aperture((aperture > (2.0 * PI_GRECO)) ? (2.0 * PI_GRECO) : ((aperture < 0.0) ? 0.0 : aperture)),
411  m_numReceptors(numReceptors),
412  m_backgroundColor(backgroundColor),
413  m_apertureMin(-m_aperture / 2.0),
414  m_apertureMax(m_aperture / 2.0),
415  m_receptorRange(m_aperture / double(m_numReceptors)),
416  m_arena(NULL),
417  m_drawCamera(false),
418  m_graphicalCamera(NULL)
419 
420 {
421  // Stating which resources we use here
422  addUsableResource("arena");
423 }
424 
426 {
427  // Nothing to do here
428 }
429 
430 namespace {
431  // This namespace contains some structures used in the LinearCamera::update() function
432 
433  // The structure containing a color and the range of the camera field hit by this color.
434  // It also contains the distance from the camera for ordering.
435  struct ColorRangeAndDistance
436  {
437  ColorRangeAndDistance() :
438  color(),
439  minAngle(0.0),
440  maxAngle(0.0),
441  distance(0.0)
442  {
443  }
444 
445  ColorRangeAndDistance(QColor c, double min, double max, double d) :
446  color(c),
447  minAngle(min),
448  maxAngle(max),
449  distance(d)
450  {
451  }
452 
453  // This is to order objects of this type
454  bool operator<(const ColorRangeAndDistance& other) const
455  {
456  return (distance < other.distance);
457  }
458 
459  QColor color;
460  double minAngle;
461  double maxAngle;
462  double distance;
463  };
464 
465  // An helper class to ease computations with multiple intervals. This class starts with a single
466  // interval and the allows to remove portions. When removing an interval, returns the portion
467  // of the initial range that was actually removed
468  class MultiInterval
469  {
470  private:
471  struct SingleInterval
472  {
473  double start;
474  double end;
475  };
476 
477  public:
478  MultiInterval() :
479  m_originalSize(0.0),
480  m_intervals()
481  {
482  }
483 
484  void initMultiInterval(double start, double end)
485  {
486  m_originalSize = end - start;
487 
488  SingleInterval i;
489  i.start = start;
490  i.end = end;
491  m_intervals.append(i);
492  }
493 
494  double removeInterval(double start, double end)
495  {
496  double removedSize = 0.0;
497 
498  // We exit from the cycle when both these variables are true: intervals are ordered so,
499  // if we have found both the interval for start and the one for end we can exit
500  bool foundStartInIntervals = false;
501  bool foundEndInIntervals = false;
502  QLinkedList<SingleInterval>::iterator it = m_intervals.begin();
503  while ((it != m_intervals.end()) && (!foundStartInIntervals || !foundEndInIntervals)) {
504  if ((start <= it->start) && (end >= it->end)) {
505  // Removing the whole interval and continuing
506  removedSize += it->end - it->start;
507  it = m_intervals.erase(it);
508  } else if ((start >= it->start) && (start < it->end) && (end > it->start) && (end <= it->end)) {
509  // Here we have to split the interval in two. We put the two new intervals in place
510  // of the old one
511  removedSize += end - start;
512  SingleInterval i1, i2;
513  i1.start = it->start;
514  i1.end = start;
515  i2.start = end;
516  i2.end = it->end;
517  it = m_intervals.erase(it);
518  // Going one step back to insert the two new items
519  --it;
520  it = m_intervals.insert(it, i1);
521  it = m_intervals.insert(it, i2);
522 
523  // This interval was completely inside another interval, so no other interval will
524  // be intersected and we can exit from the cycle
525  foundStartInIntervals = true;
526  foundEndInIntervals = true;
527  } else if ((start > it->start) && (start < it->end)) {
528  // Here we have to reduce the interval by setting the new end
529  removedSize += it->end - start;
530  it->end = start;
531  foundStartInIntervals = true;
532  ++it;
533  } else if ((end > it->start) && (end < it->end)) {
534  // Here we have to reduce the interval, by setting the new start
535  removedSize += end - it->start;
536  it->start = end;
537  foundEndInIntervals = true;
538  ++it;
539  } else {
540  // Simply incrementing the iterator
541  ++it;
542  }
543  }
544 
545  return removedSize / m_originalSize;
546  }
547 
548  private:
549  double m_originalSize;
550  // Intervals will always be ordered from the one with the lowest start to the one with the highest start.
551  // Moreover two intervals will never intersect
552  QLinkedList<SingleInterval> m_intervals;
553  };
554 
555  // An helper structure memorizing information about colors in a single receptor. minAngle and maxAngle
556  // are used to store the current portion of the receptor for which we already know the color, while
557  // colorsAndFractions is the list of colors and the portion of the receptor occupied by that color
558  struct ColorsInReceptor
559  {
560  MultiInterval curInterval;
561 
562  struct ColorAndFraction {
563  ColorAndFraction() :
564  color(),
565  fraction(0.0)
566  {
567  }
568 
569  ColorAndFraction(QColor c, double f) :
570  color(c),
571  fraction(f)
572  {
573  }
574 
575  QColor color;
576  double fraction;
577  };
578  QList<ColorAndFraction> colorsAndFractions;
579  };
580 }
581 
583 {
584 #ifdef __GNUC__
585  #warning APPENA I ROBOT SONO NELLA LISTA DEGLI OGGETTI, BISOGNA RICORDARSI DI ESCLUDERE L OGGETTO CUI LA CAMERA È ATTACCATA QUANDO SI CALCOLA L ATTIVAZIONE
586 #endif
587  // Getting the list of objects from the arena (if we have the pointer to the arena)
588  if (m_arena == NULL) {
589  m_receptors.fill(m_backgroundColor);
590 
591  return;
592  }
593  const QVector<PhyObject2DWrapper*>& objectsList = m_arena->getObjects();
594 
595  // If no object is present, we can fill the receptors list with background colors and return
596  if (objectsList.size() == 0) {
597  m_receptors.fill(m_backgroundColor);
598 
599  return;
600  }
601 
602  // Updating the matrix with the current camera position
603  wMatrix currentMtr = m_transformation * m_object->matrix();
604 
605  // First of all we need to compute which color hits each receptor
606 
607  // Now filling the list with colors, ranges and distances. If an object is perceived at the
608  // extremities of the aperture, it is split in two different ColorRangeAndDistance objects
609  QList<ColorRangeAndDistance> colorsRangesAndDistances;
610 
611  // For the moment we use the distance to order objects (see ColorRangeAndDistance::operator<), however
612  // this is not correct (occlusion doesn't work well) and so should be changed
613  for (int i = 0; i < objectsList.size(); i++) {
614  const QColor color = objectsList[i]->color();
615  double minAngle;
616  double maxAngle;
617  double distance;
618  objectsList[i]->computeLinearViewFieldOccupiedRange(currentMtr, minAngle, maxAngle, distance);
619 
620  // computeLinearViewFieldOccupiedRange returns a negative distance if the object is outside the view field
621  if (distance < 0.0) {
622  continue;
623  }
624 
625  // If the minAngle is greater than the maxAngle, splitting in two, so that we do not have to
626  // make special cases in the subsequent part of the function. Here we also check if the object
627  // is completely outside the view field or not (in the first case we don't add it to the list)
628  // We just check if the object is at least partially visible, we don't set the limits to be
629  // within the view field
630  if (minAngle > maxAngle) {
631  if ((minAngle > m_apertureMin) && (minAngle < m_apertureMax)) {
632  colorsRangesAndDistances.append(ColorRangeAndDistance(color, minAngle, m_apertureMax, distance));
633  }
634  if ((maxAngle > m_apertureMin) && (maxAngle < m_apertureMax)) {
635  colorsRangesAndDistances.append(ColorRangeAndDistance(color, m_apertureMin, maxAngle, distance));
636  }
637  } else {
638  if (((minAngle > m_apertureMin) && (minAngle < m_apertureMax)) || ((maxAngle > m_apertureMin) && (maxAngle < m_apertureMax))) {
639  colorsRangesAndDistances.append(ColorRangeAndDistance(color, minAngle, maxAngle, distance));
640  }
641  }
642  }
643 
644  // Ordering colors by distance from the camera
645  qSort(colorsRangesAndDistances);
646 
647  // Now we can add the background color at the end of the list. It covers all receptors to be sure to fill
648  // the whole field with a valid color
649  colorsRangesAndDistances.append(ColorRangeAndDistance(m_backgroundColor, m_apertureMin, m_apertureMax, std::numeric_limits<double>::infinity()));
650 
651  // The next step is to calculate the percentage of each color in the colorsRangesAndDistances list
652  // in each receptor
653  QVector<ColorsInReceptor> colorsInReceptors(m_numReceptors);
654  for (QList<ColorRangeAndDistance>::const_iterator it = colorsRangesAndDistances.begin(); it != colorsRangesAndDistances.end(); ++it) {
655  // Computing the index of receptors which are interested by this color
656  const int minIndex = max(0, floor((it->minAngle - m_apertureMin) / m_receptorRange));
657  const int maxIndex = min(m_numReceptors - 1, floor((it->maxAngle - m_apertureMin) / m_receptorRange));
658 
659  // Now cycling over the computed receptors in the colorsInReceptors list to fill it
660  for (int i = minIndex; i <= maxIndex; i++) {
661  if (colorsInReceptors[i].colorsAndFractions.size() == 0) {
662  // This is the first color in the receptor, we have to initialize the interval
663  const double receptorMin = m_apertureMin + m_receptorRange * double(i);
664  const double receptorMax = m_apertureMin + m_receptorRange * double(i + 1);
665  colorsInReceptors[i].curInterval.initMultiInterval(receptorMin, receptorMax);
666  }
667 
668  const double fraction = min(1.0, colorsInReceptors[i].curInterval.removeInterval(it->minAngle, it->maxAngle));
669  colorsInReceptors[i].colorsAndFractions.append(ColorsInReceptor::ColorAndFraction(it->color, fraction));
670  }
671  }
672 
673  // The final step is to compute the resulting color for each receptor. See class description for a comment
674  // on this procedure
675  for (int i = 0; i < m_numReceptors; i++) {
676  double red = 0.0;
677  double green = 0.0;
678  double blue = 0.0;
679  for (QList<ColorsInReceptor::ColorAndFraction>::const_iterator it = colorsInReceptors[i].colorsAndFractions.begin(); it != colorsInReceptors[i].colorsAndFractions.end(); ++it) {
680  red += it->color.redF() * it->fraction;
681  green += it->color.greenF() * it->fraction;
682  blue += it->color.blueF() * it->fraction;
683  }
684  m_receptors[i] = QColor::fromRgbF(min(1.0, red), min(1.0, green), min(1.0, blue));
685  }
686 
687  // Updating graphics if we have to
688  if (m_drawCamera) {
689  m_graphicalCamera->setPerceivedColors(m_receptors);
690  }
691 }
692 
694 {
695  if (m_drawCamera == d) {
696  return;
697  }
698 
699  m_drawCamera = d;
700  if (m_drawCamera) {
701  m_graphicalCamera = new LinearCameraGraphic(m_object, m_transformation, m_apertureMin, m_apertureMax, m_numReceptors, "linearCamera");
702  } else {
703  delete m_graphicalCamera;
704  }
705 }
706 
707 void LinearCamera::resourceChanged(QString resourceName, ResourceChangeType changeType)
708 {
709  if (resourceName == "arena") {
710  switch (changeType) {
711  case Created:
712  case Modified:
713  m_arena = getResource<Arena>();
714  break;
715  case Deleted:
716  m_arena = NULL;
717  break;
718  }
719  } else {
720  Logger::info("Unknown resource " + resourceName + " (in LinearCamera)");
721  }
722 }
723 
724 } // end namespace farsa
725