MOOSE - Multiscale Object Oriented Simulation Environment
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
SeqSynHandler.cpp
Go to the documentation of this file.
1 /**********************************************************************
2 ** This program is part of 'MOOSE', the
3 ** Messaging Object Oriented Simulation Environment.
4 ** Copyright (C) 2016 Upinder S. Bhalla. and NCBS
5 ** It is made available under the terms of the
6 ** GNU Lesser General Public License version 2.1
7 ** See the file COPYING.LIB for the full notice.
8 **********************************************************************/
9 
10 #include <queue>
11 #include "global.h"
12 #include "header.h"
13 #include "Synapse.h"
14 #include "SynEvent.h"
15 #include "SynHandlerBase.h"
16 #include "RollingMatrix.h"
17 #include "SeqSynHandler.h"
18 #include "muParser.h"
19 
21 {
22  static string doc[] =
23  {
24  "Name", "SeqSynHandler",
25  "Author", "Upi Bhalla",
26  "Description",
27  "The SeqSynHandler handles synapses that recognize sequentially "
28  "ordered input, where the ordering is both in space and time. "
29  "It assumes that the N input synapses are ordered and "
30  "equally spaced along a single linear vector.\n "
31  "To do this it maintains a record of recent synaptic input, "
32  "for a duration of *historyTime*, at a time interval *seqDt*. "
33  "*SeqDt* is typically longer than the simulation "
34  "timestep *dt* for the synapse, and cannot be shorter. "
35  "*SeqDt* should represent the characteristic time of advance "
36  "of the sequence. \n"
37  "The SeqSynHandler uses a 2-D kernel to define how to recognize"
38  " a sequence, with dependence both on space and history. "
39  "This kernel is defined by the *kernelEquation* as a "
40  "mathematical expression in x (synapse number) and t (time)."
41  "It computes a vector with the local *response* term for each "
42  "point along all inputs, by taking a 2-d convolution of the "
43  "kernel with the history[time][synapse#] matrix."
44  "\nThe local response can affect the synapse in three ways: "
45  "1. It can sum the entire response vector, scale by the "
46  "*sequenceScale* term, and send to the synapse as a steady "
47  "activation. Consider this a cell-wide immediate response to "
48  "a sequence that it likes.\n"
49  "2. It do an instantaneous scaling of the weight of each "
50  "individual synapse by the corresponding entry in the response "
51  "vector. It uses the *plasticityScale* term to do this. "
52  "Consider "
53  "this a short-term plasticity effect on specific synapses. \n"
54  "3. It can do long-term plasticity of each individual synapse "
55  "using the matched local entries in the response vector and "
56  "individual synapse history as inputs to the learning rule. "
57  "This is not yet implemented.\n"
58  "In addition to all these, the SeqSynHandler can act just like "
59  "a regular synapse, where it responds to individual synaptic "
60  "input according to the weight of the synapse. The size of "
61  "this component of the output is scaled by *baseScale*\n"
62  };
63 
65  "synapse",
66  "Sets up field Elements for synapse",
71  );
72 
73  static ValueFinfo< SeqSynHandler, string > kernelEquation(
74  "kernelEquation",
75  "Equation in x and t to define kernel for sequence recognition",
78  );
80  "kernelWidth",
81  "Width of kernel, i.e., number of synapses taking part in seq.",
84  );
86  "seqDt",
87  "Characteristic time for advancing the sequence.",
90  );
91  static ValueFinfo< SeqSynHandler, double > historyTime(
92  "historyTime",
93  "Duration to keep track of history of inputs to all synapses.",
96  );
97  static ValueFinfo< SeqSynHandler, double > baseScale(
98  "baseScale",
99  "Basal scaling factor for regular synaptic activation.",
102  );
103  static ValueFinfo< SeqSynHandler, double > sequenceScale(
104  "sequenceScale",
105  "Scaling factor for sustained activation of synapse by seq",
108  );
110  "synapseOrder",
111  "Mapping of synapse input order to spatial order on syn array."
112  "Entries in this vector are indices which must remain smaller "
113  "than numSynapses. The system will fix up if you mess up. "
114  "It does not insist on unique mappings, but these are "
115  "desirable as outcome is undefined for repeated entries.",
118  );
119  static ValueFinfo< SeqSynHandler, int > synapseOrderOption(
120  "synapseOrderOption",
121  "How to do the synapse order remapping. This rule stays in "
122  "place and guarantees safe mappings even if the number of "
123  "synapses is altered. Options:\n"
124  "-2: User ordering.\n"
125  "-1: Sequential ordering, 0 to numSynapses-1.\n"
126  "0: Random ordering using existing system seed.\n"
127  ">0: Random ordering using seed specified by this number\n"
128  "Default is -1, sequential ordering.",
131  );
132  static ReadOnlyValueFinfo< SeqSynHandler, double > seqActivation(
133  "seqActivation",
134  "Reports summed activation of synaptic channel by sequence",
136  );
137  static ValueFinfo< SeqSynHandler, double > plasticityScale(
138  "plasticityScale",
139  "Scaling factor for doing plasticity by scaling each synapse by response vector",
142  );
143 
144  static ValueFinfo< SeqSynHandler, double > sequencePower(
145  "sequencePower",
146  "Exponent for the outcome of the sequential calculations. "
147  "This is needed because linear summation of terms in the kernel"
148  "means that a brief stong sequence match is no better than lots"
149  "of successive low matches. In other words, 12345 is no better"
150  "than 11111. Using an exponent lets us select the former."
151  "Defaults to 1.0.",
154  );
155 
157  weightScaleVec(
158  "weightScaleVec",
159  "Vector of weight scaling for each synapse",
161  );
163  "kernel",
164  "All entries of kernel, as a linear vector",
166  );
168  "history",
169  "All entries of history, as a linear vector",
171  );
172 
173  static Finfo* seqSynHandlerFinfos[] = {
174  &synFinfo, // FieldElement
175  &kernelEquation, // Field
176  &kernelWidth, // Field
177  &seqDt, // Field
178  &historyTime, // Field
179  &sequenceScale, // Field
180  &baseScale, // Field
181  &synapseOrder, // Field
182  &synapseOrderOption, // Field
183  &seqActivation, // ReadOnlyField
184  &plasticityScale, // Field
185  &sequencePower, // Field
186  &weightScaleVec, // ReadOnlyField
187  &kernel, // ReadOnlyField
188  &history // ReadOnlyField
189  };
190 
191  static Dinfo< SeqSynHandler > dinfo;
192  static Cinfo seqSynHandlerCinfo (
193  "SeqSynHandler",
195  seqSynHandlerFinfos,
196  sizeof( seqSynHandlerFinfos ) / sizeof ( Finfo* ),
197  &dinfo,
198  doc,
199  sizeof( doc ) / sizeof( string )
200  );
201 
202  return &seqSynHandlerCinfo;
203 }
204 
206 
208 
210  :
211  kernelEquation_( "" ),
212  kernelWidth_( 5 ),
213  historyTime_( 2.0 ),
214  seqDt_ ( 1.0 ),
215  baseScale_( 0.0 ),
216  sequenceScale_( 1.0 ),
217  plasticityScale_( 0.0 ),
218  sequencePower_( 1.0 ),
219  seqActivation_( 0.0 ),
220  synapseOrderOption_( -1 ) // sequential ordering
221 {
222  history_.resize( numHistory(), 0 );
223 }
224 
226 { ; }
227 
230 {
231  synapses_ = ssh.synapses_;
232  for ( vector< Synapse >::iterator
233  i = synapses_.begin(); i != synapses_.end(); ++i )
234  i->setHandler( this );
235 
236  // For no apparent reason, priority queues don't have a clear operation.
237  while( !events_.empty() )
238  events_.pop();
239 
240  return *this;
241 }
242 
243 void SeqSynHandler::vSetNumSynapses( const unsigned int v )
244 {
245  unsigned int prevSize = synapses_.size();
246  synapses_.resize( v );
247  for ( unsigned int i = prevSize; i < v; ++i )
248  synapses_[i].setHandler( this );
249 
250  history_.resize( numHistory(), v );
251  latestSpikes_.resize( v, 0.0 );
252  weightScaleVec_.resize( v, 0.0 );
253  refillSynapseOrder( v );
254  updateKernel();
255 }
256 
257 unsigned int SeqSynHandler::vGetNumSynapses() const
258 {
259  return synapses_.size();
260 }
261 
263 {
264  static Synapse dummy;
265  if ( i < synapses_.size() )
266  return &synapses_[i];
267  cout << "Warning: SeqSynHandler::getSynapse: index: " << i <<
268  " is out of range: " << synapses_.size() << endl;
269  return &dummy;
270 }
271 
273 
274 // Checks for numbers bigger than the size. Replaces with
275 // values within the range that have not yet been used.
277 {
278  unsigned int sz = synapseOrder_.size();
279  vector< unsigned int > availableEntries( sz );
280  iota( availableEntries.begin(), availableEntries.end(), 0 );
281  for( unsigned int i = 0; i < sz; ++i ) {
282  if ( synapseOrder_[i] < sz )
283  availableEntries[ synapseOrder_[i] ] = sz;
284  }
285  vector< unsigned int > ae;
286  for( unsigned int i = 0; i < sz; ++i )
287  if ( availableEntries[i] < sz )
288  ae.push_back( availableEntries[i] );
289 
290  auto jj = ae.begin();
291  for( unsigned int i = 0; i < sz; ++i ) {
292  if ( synapseOrder_[i] >= sz )
293  synapseOrder_[i] = *jj++;
294  }
295 }
296 
297 // Beautiful snippet from Lukasz Wiklendt on StackOverflow. Returns order
298 // of entries in a vector.
299 template <typename T> vector<size_t> sort_indexes(const vector<T> &v) {
300  // initialize original index locations
301  vector<size_t> idx(v.size());
302  iota(idx.begin(), idx.end(), 0);
303  // sort indexes based on comparing values in v
304  sort(idx.begin(), idx.end(),
305  [&v](size_t i1, size_t i2) {return v[i1] < v[i2];});
306  return idx;
307 }
308 
309 void SeqSynHandler::refillSynapseOrder( unsigned int newSize )
310 {
311  if ( synapseOrderOption_ <= -2 ) { // User order
312  synapseOrder_.resize( newSize, newSize );
313  fixSynapseOrder();
314  } else if ( synapseOrderOption_ == -1 ) { // Ordered
315  synapseOrder_.resize( newSize );
316  for ( unsigned int i = 0 ; i < newSize; ++i )
317  synapseOrder_[i] = i;
318  } else {
319  synapseOrder_.resize( newSize );
320  if ( synapseOrderOption_ > 0 ) { // Specify seed explicitly
322  }
323  vector< double > x;
324  for ( unsigned int i = 0; i < newSize; ++i )
325  x.push_back( moose::mtrand() );
326  auto idx = sort_indexes< double >( x );
327  for ( unsigned int i = 0; i < newSize; ++i )
328  synapseOrder_[i] = idx[i];
329  }
330 }
331 
333 {
334  if ( kernelEquation_ == "" || seqDt_ < 1e-9 || historyTime_ < 1e-9 )
335  return;
336  double x = 0;
337  double t = 0;
338  mu::Parser p;
339  p.DefineVar("x", &x);
340  p.DefineVar("t", &t);
341  p.DefineConst(_T("pi"), (mu::value_type)M_PI);
342  p.DefineConst(_T("e"), (mu::value_type)M_E);
343  p.SetExpr( kernelEquation_ );
344  kernel_.clear();
345  int nh = numHistory();
346  kernel_.resize( nh );
347  for ( int i = 0; i < nh; ++i ) {
348  kernel_[i].resize( kernelWidth_ );
349  t = i * seqDt_;
350  for ( unsigned int j = 0; j < kernelWidth_; ++j ) {
351  x = j;
352  kernel_[i][j] = p.Eval();
353  }
354  }
355 }
356 
359 {
360  kernelEquation_ = eq;
361  updateKernel();
362 }
363 
365 {
366  return kernelEquation_;
367 }
368 
369 void SeqSynHandler::setKernelWidth( unsigned int v )
370 {
371  kernelWidth_ = v;
372  updateKernel();
373 }
374 
375 unsigned int SeqSynHandler::getKernelWidth() const
376 {
377  return kernelWidth_;
378 }
379 
380 void SeqSynHandler::setSeqDt( double v )
381 {
382  seqDt_ = v;
383  updateKernel();
385 }
386 
388 {
389  return seqDt_;
390 }
391 
393 {
394  historyTime_ = v;
396  updateKernel();
397 }
398 
400 {
401  return historyTime_;
402 }
403 
405 {
406  baseScale_ = v;
407 }
408 
410 {
411  return baseScale_;
412 }
413 
415 {
416  sequenceScale_ = v;
417 }
418 
420 {
421  return sequenceScale_;
422 }
423 
425 {
426  return seqActivation_;
427 }
428 
430 {
431  return plasticityScale_;
432 }
433 
435 {
436  plasticityScale_ = v;
437 }
438 
440 {
441  return sequencePower_;
442 }
443 
445 {
446  sequencePower_ = v;
447 }
448 
449 vector< double >SeqSynHandler::getWeightScaleVec() const
450 {
451  return weightScaleVec_;
452 }
453 
454 vector< double > SeqSynHandler::getKernel() const
455 {
456  int nh = numHistory();
457  vector< double > ret;
458  for ( int i = 0; i < nh; ++i ) {
459  ret.insert( ret.end(), kernel_[i].begin(), kernel_[i].end() );
460  }
461  return ret;
462 }
463 
464 vector< double > SeqSynHandler::getHistory() const
465 {
466  int nh = numHistory();
467  int numX = vGetNumSynapses();
468  vector< double > ret( numX * nh, 0.0 );
469  vector< double >::iterator k = ret.begin();
470  for ( int i = 0; i < nh; ++i ) {
471  for ( int j = 0; j < numX; ++j )
472  *k++ = history_.get( i, j );
473  }
474  return ret;
475 }
476 
477 void SeqSynHandler::setSynapseOrder( vector< unsigned int > v )
478 {
479  synapseOrder_ = v;
480  fixSynapseOrder();
481  synapseOrderOption_ = -2; // Set the flag to say it is User defined.
482 }
483 
484 vector< unsigned int > SeqSynHandler::getSynapseOrder() const
485 {
486  return synapseOrder_;
487 }
488 
490 {
493 }
494 
496 {
497  return synapseOrderOption_;
498 }
499 
501 
502 void SeqSynHandler::addSpike(unsigned int index, double time, double weight)
503 {
504  assert( index < synapses_.size() );
505  events_.push( PreSynEvent( index, time, weight ) );
506  // Strictly speaking this isn't right. If we have a long time lag
507  // then the correct placement of the spike may be in another time
508  // slice. For now, to get it going for LIF neurons, this will do.
509  // Even in the general case we will probably have a very wide window
510  // for the latestSpikes slice.
511  //
512  // Here we reorder the entries in latestSpikes by the synapse order.
513  latestSpikes_[ synapseOrder_[index] ] += weight;
514 }
515 
516 double SeqSynHandler::getTopSpike( unsigned int index ) const
517 {
518  if ( events_.empty() )
519  return 0.0;
520  return events_.top().time;
521 }
522 
524 {
525  unsigned int newSynIndex = synapses_.size();
526  synapses_.resize( newSynIndex + 1 );
527  synapses_[newSynIndex].setHandler( this );
528  return newSynIndex;
529 }
530 
531 void SeqSynHandler::dropSynapse( unsigned int msgLookup )
532 {
533  assert( msgLookup < synapses_.size() );
534  synapses_[msgLookup].setWeight( -1.0 );
535 }
536 
539 {
540  // Here we look at the correlations and do something with them.
541  int nh = numHistory();
542 
543  // Check if we need to do correlations at all.
544  if ( nh > 0 && kernel_.size() > 0 ) {
545  // Check if timestep rolls over a seqDt boundary
546  if ( static_cast< int >( p->currTime / seqDt_ ) >
547  static_cast< int >( (p->currTime - p->dt) / seqDt_ ) ) {
550  latestSpikes_.assign( vGetNumSynapses(), 0.0 );
551 
552  // Build up the sum of correlations over time
553  vector< double > correlVec( vGetNumSynapses(), 0.0 );
554  for ( int i = 0; i < nh; ++i )
555  history_.correl( correlVec, kernel_[i], i );
556  if ( sequenceScale_ > 0.0 ) { // Sum all responses, send to chan
557  seqActivation_ = 0.0;
558  for ( vector< double >::iterator y = correlVec.begin();
559  y != correlVec.end(); ++y )
560  seqActivation_ += pow( *y, sequencePower_ );
561 
562  // We'll use the seqActivation_ to send a special msg.
564  }
565  if ( plasticityScale_ > 0.0 ) { // Short term changes in individual wts
566  weightScaleVec_ = correlVec;
567  for ( vector< double >::iterator y=weightScaleVec_.begin();
568  y != weightScaleVec_.end(); ++y )
569  *y *= plasticityScale_;
570  }
571  }
572  }
573 
574  // Here we go through the regular synapse activation calculations.
575  // We can't leave it to the base class vProcess, because we need
576  // to scale the weights individually in some cases.
577  double activation = seqActivation_; // Start with seq activation
578  if ( plasticityScale_ > 0.0 ) {
579  while( !events_.empty() && events_.top().time <= p->currTime ) {
580  activation += events_.top().weight * baseScale_ *
581  weightScaleVec_[ events_.top().synIndex ] / p->dt;
582  events_.pop();
583  }
584  } else {
585  while( !events_.empty() && events_.top().time <= p->currTime ) {
586  activation += baseScale_ * events_.top().weight / p->dt;
587  events_.pop();
588  }
589  }
590  if ( activation != 0.0 )
591  SynHandlerBase::activationOut()->send( e, activation );
592 }
593 
595 {
596  // For no apparent reason, priority queues don't have a clear operation.
597  while( !events_.empty() )
598  events_.pop();
599 }
600 
602 {
603  return static_cast< int >( 1.0 + floor( historyTime_ * (1.0 - 1e-6 ) / seqDt_ ) );
604 }
vector< double > getKernel() const
void mtseed(unsigned int x)
Set the global seed or all rngs.
Definition: global.cpp:89
double getBaseScale() const
double seqActivation_
void resize(unsigned int numRows, unsigned int numColumns)
void setNumSynapses(unsigned int num)
void sumIntoRow(const vector< double > &input, unsigned int row)
vector< Synapse > synapses_
unsigned int addSynapse()
Adds a new synapse, returns its index.
double value_type
double currTime
Definition: ProcInfo.h:19
vector< double > getWeightScaleVec() const
double getTopSpike(unsigned int index) const
Definition: Dinfo.h:60
priority_queue< PreSynEvent, vector< PreSynEvent >, CompareSynEvent > events_
static DestFinfo dummy("dummy","This Finfo is a dummy. If you are reading this you have used an invalid index", 0)
double getSequenceScale() const
void setPlasticityScale(double v)
void vProcess(const Eref &e, ProcPtr p)
double getSequencePower() const
vector< unsigned int > getSynapseOrder() const
void addSpike(unsigned int index, double time, double weight)
void vReinit(const Eref &e, ProcPtr p)
unsigned int getNumSynapses() const
void setKernelEquation(string eq)
SeqSynHandler & operator=(const SeqSynHandler &other)
#define M_PI
Definition: numutil.h:34
string getKernelEquation() const
void dropSynapse(unsigned int droppedSynNumber)
void refillSynapseOrder(unsigned int newSize)
vector< size_t > sort_indexes(const vector< T > &v)
double plasticityScale_
double getPlasticityScale() const
static const Cinfo * initCinfo()
double historyTime_
Time to store history. KernelDt defines num of rows.
Synapse * vGetSynapse(unsigned int i)
Synapse * getSynapse(unsigned int i)
vector< double > weightScaleVec_
double getSeqDt() const
void setKernelWidth(unsigned int v)
void setSeqDt(double v)
static const Cinfo * initCinfo()
Definition: Synapse.cpp:15
int getSynapseOrderOption() const
void vSetNumSynapses(unsigned int num)
static const Cinfo * initCinfo()
double getSeqActivation() const
static const Cinfo * seqSynHandlerCinfo
void fixSynapseOrder()
double baseScale_
Scaling factor for baseline synaptic responses.
double dt
Definition: ProcInfo.h:18
void setSynapseOrderOption(int v)
int numHistory() const
Definition: Eref.h:26
void correl(vector< double > &ret, const vector< double > &input, unsigned int row) const
vector< vector< double > > kernel_
vector< double > getHistory() const
RollingMatrix history_
Kernel for seq selectivity.
unsigned int vGetNumSynapses() const
void setSequencePower(double v)
void setSynapseOrder(vector< unsigned int > v)
unsigned int getKernelWidth() const
string kernelEquation_
Definition: SeqSynHandler.h:98
static SrcFinfo1< double > * activationOut()
double mtrand(void)
Generate a random double between 0 and 1.
Definition: global.cpp:97
void setBaseScale(double v)
void setHistoryTime(double v)
vector< double > latestSpikes_
double get(unsigned int row, unsigned int column) const
unsigned int kernelWidth_
Definition: SeqSynHandler.h:99
#define M_E
Definition: numutil.h:38
double getHistoryTime() const
Definition: Cinfo.h:18
void setSequenceScale(double v)
vector< unsigned int > synapseOrder_
Rows = time; cols = synInputs.
double sequencePower_
Definition: Finfo.h:12
double sequenceScale_
Scaling factor for sequence recognition responses.