summaryrefslogtreecommitdiff
path: root/core/convert_topology.cpp
blob: e348c80d95423b5d9c250864e4b83ad51837147b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
/* format
   "src dst delay weight\n"
   multiple src dst combinations are possible (?)
 */
#include <string.h>
#include <iostream>
#include <map>

#include <boost/tuple/tuple.hpp>
#include <boost/mpl/pair.hpp>
#include <boost/mpl/list.hpp>

#include "pla_set.hpp"
#include "pointers.hpp"
#include "property_composition.hpp"
#include "simlimits.hpp"
#include "time.hpp"
#include "topology.hpp"

#include "model.hpp"

#include "mempool.hpp"

using namespace std;

typedef Ptr<Neuron>::ptr_t     np_t;
typedef Ptr<Synapse>::ptr_t    sp_t;
typedef Ptr<Synapse>::offset_t op_t;

struct Connection {
  np_t src, dst;
  Weight::type weight;
  Time::type delay;
};

map<np_t, multimap<Time, Connection>*> cons;

PropertyComposition<boost::mpl::list<
  boost::mpl::pair<Weight, boost::mpl::bool_<true>>,
  boost::mpl::pair<TargetSumWeight, boost::mpl::bool_<true>>,
  boost::mpl::pair<SumWeight, boost::mpl::bool_<true>>
>> pc;


void init() {
  // check that pc time is 0.0
  assert(pc.properties.data.data.timeLimit == 0.0);
  for (np_t i=0; i<maxNeurons; i++)
    cons[i] = new multimap<Time, Connection>();
}

void read() {
  cin >> skipws;
  while (!cin.eof()) {
    Connection c;

    // read from stream
    cin >> c.src >> c.dst >> c.delay >> c.weight;
    assert(!cin.fail());
    cin >> ws;

    // first sanity check
    assert(c.src < maxNeurons);
    assert(c.dst < maxNeurons);
    assert(c.delay > 0.0);
    assert(c.weight != 0.0);

    // store (to sort)
    cons[c.src]->insert(make_pair(c.delay, c));
  }
}

void addPseudo() {
  const Ptr<Neuron>::ptr_t half = maxNeurons/2;
  assert(numActualNeurons <= half);
  for (Ptr<Neuron>::ptr_t i=0; i<half; i++) {
    Connection c;
    c.src = i + half;
    c.dst = i;
    c.delay = Time::epsilon()();
    c.weight = ModelConsts::TrainerInput;
    cons[c.src]->insert(make_pair(c.delay, c));
  }
}

void setWeight(const sp_t synapse, Weight::type weight) {
  PLA_Set<Weight> pla{Time{0}, Ptr<Synapse>{synapse}, weight};
  pc.call(pla);
}

void writeTopology() {
  // fill topology tables
  Array<Time, maxNeurons * maxSynapsesPerNeuron> &delay
    = *(new Array<Time, maxNeurons * maxSynapsesPerNeuron>());
  Array<Ptr<Synapse>::ptr_t, maxNeurons * maxSynapsesPerNeuron> &target
    = *(new Array<Ptr<Synapse>::ptr_t, maxNeurons * maxSynapsesPerNeuron>());

  op_t currentSynapse[maxNeurons];
  memset(currentSynapse, 0, sizeof(currentSynapse));

  for (np_t i=0; i<maxNeurons; i++) {
    op_t j=0;
    multimap<Time, Connection> &l = *(cons[i]);
    assert(l.size() < maxSynapsesPerNeuron); // last synapse required for nil

    for (multimap<Time, Connection>::iterator k = l.begin(); k != l.end(); k++) {
      sp_t t = i * maxSynapsesPerNeuron + j;
      Connection &c = (*k).second;
      Time::type dt =  c.delay;
      sp_t s = c.dst * maxSynapsesPerNeuron + currentSynapse[c.dst]++;

      delay.set (t, dt);
      target.set(t, s);
      setWeight(s, c.weight);
      j++;
    }

    // fill unused synapses
    for (; j<maxSynapsesPerNeuron; j++) {
      sp_t t = i * maxSynapsesPerNeuron + j;
      target.set(t, Topology::nil()());
      delay.set(t, Time(-666)); // HINT: this should raise an decreasing time error
    }
  }

  // write topology via special ctor
  { Topology(delay, target); }

  // init unused synapse weight to inf to provoke errors
  for (np_t i=0; i<maxNeurons; i++) {
    while (currentSynapse[i] < maxSynapsesPerNeuron) {
      setWeight(i * maxSynapsesPerNeuron + currentSynapse[i], 
		std::numeric_limits<Weight::type>::infinity());
      currentSynapse[i]++;
    }
  }
}

void writeWeightSums() {
  Topology t;
  PropertyInstance<Weight, true> weights;
  for (auto neuron : Ptr<Global>().childs()) {
    SumWeight::type weightSum = 0;
    for (auto synapse : neuron.childs()) {
      Weight::type weight = weights.data.getValue(Time{0}, synapse());
      if (weight > 0 and weight < std::numeric_limits<Weight::type>::infinity())
	weightSum += weight;
    }
    PLA_Set<TargetSumWeight> s1{Time{0}, neuron, weightSum};
    PLA_Set<SumWeight>       s2{Time{0}, neuron, weightSum};
    pc.call(s1); pc.call(s2);
  }
}

int main() {
  init();
  read();
  addPseudo();
  writeTopology();
  writeWeightSums();
}
contact: Jan Huwald // Impressum