diff options
Diffstat (limited to 'model/Rand.model')
-rw-r--r-- | model/Rand.model | 187 |
1 files changed, 187 insertions, 0 deletions
diff --git a/model/Rand.model b/model/Rand.model new file mode 100644 index 0000000..54c3dd6 --- /dev/null +++ b/model/Rand.model @@ -0,0 +1,187 @@ +const const { + double FireThreshold = 0.015; // [V], fasimu + double Tau_Voltage = 0.05; // [s], fasimu + double RefractoryPeriod = 0.001; // [s], fasimu + double Tau_Dopamine = 0.005; // [s], fasimu + double TauEligibility = 0.2; // [s], fasimu + double Tau_LTP = 0.014; // [s], fasimu + double Tau_LTD = 0.034; // [s], fasimu + double Delta_LTP = 1; // [V] + double Delta_LTD = 1; // [V] + double DeltaET_LTP = 0.000103; // christina 2008 (poster) + double DeltaET_LTD = 0.000055; // christina 2008 (poster) + + double Tau_MomEst = 0.01; // [s] + double Tau_SlowMomEst = 20; // [s] + + double TargetFreq = 10; // [Hz] + + double RandomFreq = 3.15; // [Hz] (per neuron); is the break even + // point (rand freq = neuron freq) w/o IP + double RandomSpikeWeight = 0.01; // [V] + + double MaxWeight = 0.004; // [V] + + int TrainerNumSymbols = 2; + double TrainerInput = 0.008; // [V] + double TrainerInputWindow = 0.05; // [ms] + double TrainerInputFreq = 40; // [Hz] + double TrainerWinAdv = 0; + double TrainerReward = 50; // reduced to half as Tau_ET will be increased + double TrainerPunish = 0; + double TrainerReadoutDelay = 0.25; + double TrainerReadoutRandDelay = 0.05; + double TrainerReadoutWindow = 0.05; + double TrainerReadoutFreq = 40; // [Hz] + double TrainerInterTrialDelay = 0.75; + double TrainerInterTrialRandDelay = 0.5; + + double TrainerInitialDelay = 50; + + int FanIn = 100; + int FanOut = 100; + int NumExcitatory = 800; + + double LambdaIP1 = 0.00005; // achieves steady state after ~50s +} + +discrete GlobalMsg { + TrainerT NextTrainer; +} + +continuous Global { + double Dopamine = 0.0; + TrainerT Trainer = TrainerT(); + double Performance = 1.0 / TrainerNumSymbols; + uint16_t LastInput = 2; + uint16_t LastOutput = 2; + bool ResetSpikeCounter = true; + + Dopamine' = Dopamine * exp(-dt / Tau_Dopamine); + + on GlobalMsg { + Trainer' = NextTrainer; + Performance' = NextTrainer.performance; + LastInput' = NextTrainer.input; + LastOutput' = NextTrainer.output; + ResetSpikeCounter' = NextTrainer.resetCounter; + } + + emit GlobalMsg { + default true; + after Trainer'.delay; + + Dopamine' = Dopamine + Trainer.reward; + NextTrainer' = Trainer.update(pc, indices, queues, t); + } +} + +continuous Neuron { + double Voltage = 0.0; + double LTDTrace = 0.0; + double RefractoryLeft = 0.0; + double SpikeRate = 0.0; + double Moment1 = TargetFreq; + double SlowMoment1 = TargetFreq; + double IPCoeff1 = 0.0; + double SumWeight = 0.0; // set by convert_topology + double TargetSumWeight = 0.0; // set by convert_topology + Time LastSpike = 0.0; + uint16_t SpikeCounter = 0; + RNG::seed_t RandomSeed = 0; // initialized seperately during bootstrap + bool RandomEnabled = false; // an incoming randspike turns this to true, + // on outgoing to false again -> only + // incoming randspikes generate new ones + + Voltage' = Voltage * exp(-dt / Tau_Voltage); + LTDTrace' = LTDTrace * exp(-dt / Tau_LTD); + RefractoryLeft' = fmax(0.0, RefractoryLeft - dt); + Moment1' = Moment1 * exp(-dt / Tau_MomEst); + SpikeCounter' = ResetSpikeCounter ? 0 : SpikeCounter; + + on SpikeArrival { + SumWeight' = SumWeight + DeltaWeight; + Voltage' = Voltage + Weight; + } + + on RandomSpike { + Voltage' = Voltage + RandomSpikeWeight + FireThreshold + * (context.template getptr<Neuron>()() > NumExcitatory); + RandomEnabled' = context.template getptr<Neuron>()() < NumExcitatory; + } + + emit Spike { + default true; + if Voltage' > FireThreshold + _CP(IPCoeff1); + if RefractoryLeft' == 0.0; + + LTDTrace' = Delta_LTD; + RefractoryLeft' = RefractoryPeriod * (context.template getptr<Neuron>()() < NumExcitatory); + Voltage' = 0; + LastSpike' = t; + SpikeRate' = 1 / (t() - LastSpike()); + + Moment1' = Moment1 + SpikeRate' * (1 - exp(- (t() - LastSpike()) / Tau_MomEst)); + SlowMoment1' = SlowMoment1 * exp(- (t() - LastSpike()) / Tau_SlowMomEst) + + SpikeRate' * (1 - exp(- (t() - LastSpike()) / Tau_SlowMomEst)); + IPCoeff1' = (context.template getptr<Neuron>()() < NumExcitatory) + ? fmax(-FireThreshold, IPCoeff1 - (t() - LastSpike()) * LambdaIP1 * (TargetFreq - SpikeRate')) + : IPCoeff1; + + SpikeCounter' = ResetSpikeCounter ? 0 : SpikeCounter + 1; + + } + + emit RandomSpike { + default false; + if RandomEnabled' == true; + after RNG::expo(RandomSeed, 1.0 / RandomFreq); + + RandomEnabled' = false; + RandomSeed' = RNG::next(RandomSeed); + } +} + +continuous Synapse { + double Weight = 0.0; // set by convert_topology + double DeltaWeight = 0; + double TmpDeltaWeight = 0; + double EligibilityTrace = 0.0; + double LTPTrace = 0.0; + + TmpDeltaWeight' = (Weight >= 0 && Weight <= MaxWeight + && (context.template getptr<Neuron>()() < NumExcitatory)) + ? TmpDeltaWeight + + EligibilityTrace * Dopamine + * Tau_Dopamine * TauEligibility + / (Tau_Dopamine + TauEligibility) + * (1.0 - exp( (-(Tau_Dopamine + TauEligibility)) + / (Tau_Dopamine * TauEligibility) + * dt)) + : 0; + // HACK: we compare begin of time interval to evolve with time of the last + // outgoing spike; if equal, this is the first time that synapse if + // evolved since that spike and we can add LTDTrace + EligibilityTrace' = EligibilityTrace * exp(-dt / TauEligibility) + + (LastSpike == t) * DeltaET_LTP * LTPTrace'; + LTPTrace' = LTPTrace * exp(-dt / Tau_LTP); + + on Spike { + LTPTrace' = LTPTrace + Delta_LTP; + EligibilityTrace' = EligibilityTrace - DeltaET_LTD * LTDTrace; + } + + emit SpikeArrival { + default true; // unconditionally emit SA event (it is more like + // a pseudo event) + Weight' = (Weight >= 0 && Weight <= MaxWeight + && (context.template getptr<Neuron>()() < NumExcitatory)) + ? fmin(MaxWeight, fmax(0, + (Weight + TmpDeltaWeight) + / (SumWeight + TmpDeltaWeight) + * TargetSumWeight)) + : Weight; + DeltaWeight' = Weight' - Weight; + TmpDeltaWeight' = 0; + } +} |