From 7b19d3827db8f538ae87ae8026202b8712bc8211 Mon Sep 17 00:00:00 2001 From: "Randall C. O'Reilly" Date: Tue, 8 Oct 2024 14:38:31 -0700 Subject: [PATCH] axon finally into building just axon dir --- axon/act.go | 452 +++++++++++++++++++++---------------------- axon/act_path.go | 2 +- axon/helpers.go | 3 +- axon/hip_net.go | 4 +- axon/layerparams.go | 314 +++++++++++++++--------------- axon/learn.go | 74 +++---- axon/logging.go | 43 ++-- axon/looper.go | 3 +- axon/path.go | 60 +++--- axon/path_compute.go | 56 +++--- axon/pathparams.go | 212 ++++++++++---------- kinase/linear.go | 31 +-- 12 files changed, 629 insertions(+), 625 deletions(-) diff --git a/axon/act.go b/axon/act.go index 868f02f2..0c45ccde 100644 --- a/axon/act.go +++ b/axon/act.go @@ -831,41 +831,41 @@ func (ac *ActParams) Update() { // of the decay parameter that then has impacts on learning rates etc. // see Act.Decay.LearnCa param controlling this func (ac *ActParams) DecayLearnCa(ctx *Context, ni, di uint32, decay float32) { - Neurons.SetSub(decay*Neurons[GnmdaLrn, ni, di], GnmdaLrn, ni, di) - Neurons.SetSub(decay*Neurons[NmdaCa, ni, ni, di], NmdaCa, ni, di) + Neurons.SetSub(decay*Neurons[GnmdaLrn, ni, di], int(GnmdaLrn), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[NmdaCa, ni, ni, di], int(NmdaCa), int(ni), int(di)) - Neurons.SetSub(decay*Neurons[VgccCa, ni, di], VgccCa, ni, di) - Neurons.SetSub(decay*Neurons[VgccCaInt, ni, di], VgccCaInt, ni, di) + Neurons.SetSub(decay*Neurons[VgccCa, ni, di], int(VgccCa), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[VgccCaInt, ni, di], int(VgccCaInt), int(ni), int(di)) - Neurons.SetSub(decay*Neurons[CaLrn, ni, di], CaLrn, ni, di) + Neurons.SetSub(decay*Neurons[CaLrn, ni, di], int(CaLrn), int(ni), int(di)) - Neurons.SetSub(decay*Neurons[CaSpkM, ni, di], CaSpkM, ni, di) - Neurons.SetSub(decay*Neurons[CaSpkP, ni, di], CaSpkP, ni, di) - Neurons.SetSub(decay*Neurons[CaSpkD, ni, di], CaSpkD, ni, di) + Neurons.SetSub(decay*Neurons[CaSpkM, ni, di], int(CaSpkM), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[CaSpkP, ni, di], int(CaSpkP), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[CaSpkD, ni, di], int(CaSpkD), int(ni), int(di)) - Neurons.SetSub(decay*Neurons[NrnCaM, ni, di], NrnCaM, ni, di) - Neurons.SetSub(decay*Neurons[NrnCaP, ni, di], NrnCaP, ni, di) - Neurons.SetSub(decay*Neurons[NrnCaD, ni, di], NrnCaD, ni, di) + Neurons.SetSub(decay*Neurons[NrnCaM, ni, di], int(NrnCaM), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[NrnCaP, ni, di], int(NrnCaP), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[NrnCaD, ni, di], int(NrnCaD), int(ni), int(di)) // recovers - Neurons.SetAdd(decay*(1.0-Neurons[SKCaIn, ni, di]), SKCaIn, ni, di) - Neurons.SetSub(decay*Neurons[SKCaR, ni, di], SKCaR, ni, di) - Neurons.SetSub(decay*Neurons[SKCaM, ni, di], SKCaM, ni, di) + Neurons.SetAdd(decay*(1.0-Neurons[SKCaIn, ni, di]), int(SKCaIn), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[SKCaR, ni, di], int(SKCaR), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[SKCaM, ni, di], int(SKCaM), int(ni), int(di)) } // DecayAHP decays after-hyperpolarization variables // by given factor (typically Decay.AHP) func (ac *ActParams) DecayAHP(ctx *Context, ni, di uint32, decay float32) { - Neurons.SetSub(decay*Neurons[MahpN, ni, di], MahpN, ni, di) - Neurons.SetSub(decay*Neurons[Gmahp, ni, di], Gmahp, ni, di) - Neurons.SetSub(decay*Neurons[SahpCa, ni, di], SahpCa, ni, di) - Neurons.SetSub(decay*Neurons[SahpN, ni, di], SahpN, ni, di) - Neurons.SetSub(decay*Neurons[Gsahp, ni, di], Gsahp, ni, di) - Neurons.SetSub(decay*Neurons[GknaMed, ni, di], GknaMed, ni, di) - Neurons.SetSub(decay*Neurons[GknaSlow, ni, di], GknaSlow, ni, di) + Neurons.SetSub(decay*Neurons[MahpN, ni, di], int(MahpN), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[Gmahp, ni, di], int(Gmahp), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[SahpCa, ni, di], int(SahpCa), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[SahpN, ni, di], int(SahpN), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[Gsahp, ni, di], int(Gsahp), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[GknaMed, ni, di], int(GknaMed), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[GknaSlow, ni, di], int(GknaSlow), int(ni), int(di)) kirMrest := ac.Kir.Mrest - Neurons.SetAdd(decay*(kirMrest-Neurons[KirM, ni, di]), KirM, ni, di) - Neurons.SetSub(decay*Neurons[Gkir, ni, di], Gkir, ni, di) + Neurons.SetAdd(decay*(kirMrest-Neurons[KirM, ni, di]), int(KirM), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[Gkir, ni, di], int(Gkir), int(ni), int(di)) } // DecayState decays the activation state toward initial values @@ -875,68 +875,68 @@ func (ac *ActParams) DecayAHP(ctx *Context, ni, di uint32, decay float32) { // Called with ac.Decay.Act by Layer during NewState func (ac *ActParams) DecayState(ctx *Context, ni, di uint32, decay, glong, ahp float32) { // always reset these -- otherwise get insanely large values that take forever to update - Neurons.Set(-1, ISIAvg, ni, di) - Neurons.Set(ac.Init.Act, ActInt, ni, di) - Neurons.Set(0, Spiked, ni, di) + Neurons.Set(-1, int(ISIAvg), int(ni), int(di)) + Neurons.Set(ac.Init.Act, int(ActInt), int(ni), int(di)) + Neurons.Set(0, int(Spiked), int(ni), int(di)) if decay > 0 { // no-op for most, but not all.. - Neurons.Set(0, Spike, ni, di) - Neurons.SetSub(decay*(Neurons[Act, ni, di]-ac.Init.Act), Act, ni, di) - Neurons.SetSub(decay*(Neurons[ActInt, ni, di]-ac.Init.Act), ActInt, ni, di) - Neurons.SetSub(decay*(Neurons[GeSyn, ni, di]-NeuronAvgs[GeBase, ni]), GeSyn, ni, di) - Neurons.SetSub(decay*(Neurons[Ge, ni, di]-NeuronAvgs[GeBase, ni]), Ge, ni, di) - Neurons.SetSub(decay*(Neurons[Gi, ni, di]-NeuronAvgs[GiBase, ni]), Gi, ni, di) - Neurons.SetSub(decay*Neurons[Gk, ni, di], Gk, ni, di) + Neurons.Set(0, int(Spike), int(ni), int(di)) + Neurons.SetSub(decay*(Neurons[Act, ni, di]-ac.Init.Act), int(Act), int(ni), int(di)) + Neurons.SetSub(decay*(Neurons[ActInt, ni, di]-ac.Init.Act), int(ActInt), int(ni), int(di)) + Neurons.SetSub(decay*(Neurons[GeSyn, ni, di]-NeuronAvgs[GeBase, ni]), int(GeSyn), int(ni), int(di)) + Neurons.SetSub(decay*(Neurons[Ge, ni, di]-NeuronAvgs[GeBase, ni]), int(Ge), int(ni), int(di)) + Neurons.SetSub(decay*(Neurons[Gi, ni, di]-NeuronAvgs[GiBase, ni]), int(Gi), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[Gk, ni, di], int(Gk), int(ni), int(di)) - Neurons.SetSub(decay*(Neurons[Vm, ni, di]-ac.Init.Vm), Vm, ni, di) + Neurons.SetSub(decay*(Neurons[Vm, ni, di]-ac.Init.Vm), int(Vm), int(ni), int(di)) - Neurons.SetSub(decay*Neurons[GeNoise, ni, di], GeNoise, ni, di) - Neurons.SetSub(decay*Neurons[GiNoise, ni, di], GiNoise, ni, di) + Neurons.SetSub(decay*Neurons[GeNoise, ni, di], int(GeNoise), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[GiNoise, ni, di], int(GiNoise), int(ni), int(di)) - Neurons.SetSub(decay*Neurons[GiSyn, ni, di], GiSyn, ni, di) + Neurons.SetSub(decay*Neurons[GiSyn, ni, di], int(GiSyn), int(ni), int(di)) - Neurons.SetSub(decay*Neurons[GeInt, ni, di], GeInt, ni, di) - Neurons.SetSub(decay*Neurons[GiInt, ni, di], GiInt, ni, di) - Neurons.SetSub(decay*Neurons[GeIntNorm, ni, di], GeIntNorm, ni, di) + Neurons.SetSub(decay*Neurons[GeInt, ni, di], int(GeInt), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[GiInt, ni, di], int(GiInt), int(ni), int(di)) + Neurons.SetSub(decay*Neurons[GeIntNorm, ni, di], int(GeIntNorm), int(ni), int(di)) } - Neurons.SetSub(glong*(Neurons[VmDend, ni, di]-ac.Init.Vm), VmDend, ni, di) + Neurons.SetSub(glong*(Neurons[VmDend, ni, di]-ac.Init.Vm), int(VmDend), int(ni), int(di)) if ahp > 0 { ac.DecayAHP(ctx, ni, di, ahp) } - Neurons.SetSub(glong*Neurons[GgabaB, ni, di], GgabaB, ni, di) - Neurons.SetSub(glong*Neurons[GABAB, ni, di], GABAB, ni, di) - Neurons.SetSub(glong*Neurons[GABABx, ni, di], GABABx, ni, di) + Neurons.SetSub(glong*Neurons[GgabaB, ni, di], int(GgabaB), int(ni), int(di)) + Neurons.SetSub(glong*Neurons[GABAB, ni, di], int(GABAB), int(ni), int(di)) + Neurons.SetSub(glong*Neurons[GABABx, ni, di], int(GABABx), int(ni), int(di)) - Neurons.SetSub(glong*Neurons[GnmdaSyn, ni, di], GnmdaSyn, ni, di) - Neurons.SetSub(glong*Neurons[Gnmda, ni, di], Gnmda, ni, di) - Neurons.SetSub(glong*Neurons[GMaintSyn, ni, di], GMaintSyn, ni, di) - Neurons.SetSub(glong*Neurons[GnmdaMaint, ni, di], GnmdaMaint, ni, di) + Neurons.SetSub(glong*Neurons[GnmdaSyn, ni, di], int(GnmdaSyn), int(ni), int(di)) + Neurons.SetSub(glong*Neurons[Gnmda, ni, di], int(Gnmda), int(ni), int(di)) + Neurons.SetSub(glong*Neurons[GMaintSyn, ni, di], int(GMaintSyn), int(ni), int(di)) + Neurons.SetSub(glong*Neurons[GnmdaMaint, ni, di], int(GnmdaMaint), int(ni), int(di)) - Neurons.SetSub(glong*Neurons[Gvgcc, ni, di], Gvgcc, ni, di) - Neurons.SetSub(glong*Neurons[VgccM, ni, di], VgccM, ni, di) - Neurons.SetSub(glong*Neurons[VgccH, ni, di], VgccH, ni, di) - Neurons.SetSub(glong*Neurons[Gak, ni, di], Gak, ni, di) + Neurons.SetSub(glong*Neurons[Gvgcc, ni, di], int(Gvgcc), int(ni), int(di)) + Neurons.SetSub(glong*Neurons[VgccM, ni, di], int(VgccM), int(ni), int(di)) + Neurons.SetSub(glong*Neurons[VgccH, ni, di], int(VgccH), int(ni), int(di)) + Neurons.SetSub(glong*Neurons[Gak, ni, di], int(Gak), int(ni), int(di)) // don't mess with SKCa -- longer time scale - Neurons.SetSub(glong*Neurons[Gsk, ni, di], Gsk, ni, di) + Neurons.SetSub(glong*Neurons[Gsk, ni, di], int(Gsk), int(ni), int(di)) if ac.Decay.LearnCa > 0 { // learning-based Ca values -- not usual ac.DecayLearnCa(ctx, ni, di, ac.Decay.LearnCa) } - Neurons.Set(0, Inet, ni, di) - Neurons.Set(0, GeRaw, ni, di) - Neurons.Set(0, GiRaw, ni, di) - Neurons.Set(0, GModRaw, ni, di) - Neurons.Set(0, GModSyn, ni, di) - Neurons.Set(0, GMaintRaw, ni, di) - Neurons.Set(0, SSGi, ni, di) - Neurons.Set(0, SSGiDend, ni, di) - Neurons.Set(0, GeExt, ni, di) + Neurons.Set(0, int(Inet), int(ni), int(di)) + Neurons.Set(0, int(GeRaw), int(ni), int(di)) + Neurons.Set(0, int(GiRaw), int(ni), int(di)) + Neurons.Set(0, int(GModRaw), int(ni), int(di)) + Neurons.Set(0, int(GModSyn), int(ni), int(di)) + Neurons.Set(0, int(GMaintRaw), int(ni), int(di)) + Neurons.Set(0, int(SSGi), int(ni), int(di)) + Neurons.Set(0, int(SSGiDend), int(ni), int(di)) + Neurons.Set(0, int(GeExt), int(ni), int(di)) - Neurons.SetSub(glong*Neurons[CtxtGeOrig, ni, di], CtxtGeOrig, ni, di) + Neurons.SetSub(glong*Neurons[CtxtGeOrig, ni, di], int(CtxtGeOrig), int(ni), int(di)) } //gosl:end @@ -944,85 +944,85 @@ func (ac *ActParams) DecayState(ctx *Context, ni, di uint32, decay, glong, ahp f // InitActs initializes activation state in neuron -- called during InitWeights but otherwise not // automatically called (DecayState is used instead) func (ac *ActParams) InitActs(ctx *Context, ni, di uint32) { - Neurons.Set(0, Spike, ni, di) - Neurons.Set(0, Spiked, ni, di) - Neurons.Set(-1, ISI, ni, di) - Neurons.Set(-1, ISIAvg, ni, di) - Neurons.Set(ac.Init.Act, Act, ni, di) - Neurons.Set(ac.Init.Act, ActInt, ni, di) - Neurons.Set(NeuronAvgs[GeBase, ni], GeSyn, ni, di) - Neurons.Set(NeuronAvgs[GeBase, ni], Ge, ni, di) - Neurons.Set(NeuronAvgs[GiBase, ni], Gi, ni, di) - Neurons.Set(0, Gk, ni, di) - Neurons.Set(0, Inet, ni, di) - Neurons.Set(ac.Init.Vm, Vm, ni, di) - Neurons.Set(ac.Init.Vm, VmDend, ni, di) - Neurons.Set(0, Target, ni, di) - Neurons.Set(0, Ext, ni, di) - - Neurons.Set(0, SpkMaxCa, ni, di) - Neurons.Set(0, SpkMax, ni, di) - Neurons.Set(1, RLRate, ni, di) - - Neurons.Set(1, GeNoiseP, ni, di) - Neurons.Set(0, GeNoise, ni, di) - Neurons.Set(1, GiNoiseP, ni, di) - Neurons.Set(0, GiNoise, ni, di) - Neurons.Set(0, GiSyn, ni, di) - Neurons.Set(1, SMaintP, ni, di) - - Neurons.Set(0, GeInt, ni, di) - Neurons.Set(0, GeIntNorm, ni, di) - Neurons.Set(0, GiInt, ni, di) - - Neurons.Set(0, MahpN, ni, di) - Neurons.Set(0, Gmahp, ni, di) - Neurons.Set(0, SahpCa, ni, di) - Neurons.Set(0, SahpN, ni, di) - Neurons.Set(0, Gsahp, ni, di) - Neurons.Set(0, GknaMed, ni, di) - Neurons.Set(0, GknaSlow, ni, di) - Neurons.Set(ac.Kir.Mrest, KirM, ni, di) - Neurons.Set(0, Gkir, ni, di) - - Neurons.Set(0, GnmdaSyn, ni, di) - Neurons.Set(0, Gnmda, ni, di) - Neurons.Set(0, GnmdaMaint, ni, di) - Neurons.Set(0, GnmdaLrn, ni, di) - Neurons.Set(0, NmdaCa, ni, di) - - Neurons.Set(0, GgabaB, ni, di) - Neurons.Set(0, GABAB, ni, di) - Neurons.Set(0, GABABx, ni, di) - - Neurons.Set(0, Gvgcc, ni, di) - Neurons.Set(0, VgccM, ni, di) - Neurons.Set(0, VgccH, ni, di) - Neurons.Set(0, Gak, ni, di) - Neurons.Set(0, VgccCaInt, ni, di) - - Neurons.Set(1, SKCaIn, ni, di) - Neurons.Set(0, SKCaR, ni, di) - Neurons.Set(0, SKCaM, ni, di) - Neurons.Set(0, Gsk, ni, di) - - Neurons.Set(0, GeExt, ni, di) - Neurons.Set(0, GeRaw, ni, di) - Neurons.Set(0, GiRaw, ni, di) - Neurons.Set(0, GModRaw, ni, di) - Neurons.Set(0, GModSyn, ni, di) - Neurons.Set(0, GMaintRaw, ni, di) - Neurons.Set(0, GMaintSyn, ni, di) - - Neurons.Set(0, SSGi, ni, di) - Neurons.Set(0, SSGiDend, ni, di) - - Neurons.Set(0, Burst, ni, di) - Neurons.Set(0, BurstPrv, ni, di) - - Neurons.Set(0, CtxtGe, ni, di) - Neurons.Set(0, CtxtGeRaw, ni, di) - Neurons.Set(0, CtxtGeOrig, ni, di) + Neurons.Set(0, int(Spike), int(ni), int(di)) + Neurons.Set(0, int(Spiked), int(ni), int(di)) + Neurons.Set(-1, int(ISI), int(ni), int(di)) + Neurons.Set(-1, int(ISIAvg), int(ni), int(di)) + Neurons.Set(ac.Init.Act, int(Act), int(ni), int(di)) + Neurons.Set(ac.Init.Act, int(ActInt), int(ni), int(di)) + Neurons.Set(NeuronAvgs[GeBase, ni], int(GeSyn), int(ni), int(di)) + Neurons.Set(NeuronAvgs[GeBase, ni], int(Ge), int(ni), int(di)) + Neurons.Set(NeuronAvgs[GiBase, ni], int(Gi), int(ni), int(di)) + Neurons.Set(0, int(Gk), int(ni), int(di)) + Neurons.Set(0, int(Inet), int(ni), int(di)) + Neurons.Set(ac.Init.Vm, int(Vm), int(ni), int(di)) + Neurons.Set(ac.Init.Vm, int(VmDend), int(ni), int(di)) + Neurons.Set(0, int(Target), int(ni), int(di)) + Neurons.Set(0, int(Ext), int(ni), int(di)) + + Neurons.Set(0, int(SpkMaxCa), int(ni), int(di)) + Neurons.Set(0, int(SpkMax), int(ni), int(di)) + Neurons.Set(1, int(RLRate), int(ni), int(di)) + + Neurons.Set(1, int(GeNoiseP), int(ni), int(di)) + Neurons.Set(0, int(GeNoise), int(ni), int(di)) + Neurons.Set(1, int(GiNoiseP), int(ni), int(di)) + Neurons.Set(0, int(GiNoise), int(ni), int(di)) + Neurons.Set(0, int(GiSyn), int(ni), int(di)) + Neurons.Set(1, int(SMaintP), int(ni), int(di)) + + Neurons.Set(0, int(GeInt), int(ni), int(di)) + Neurons.Set(0, int(GeIntNorm), int(ni), int(di)) + Neurons.Set(0, int(GiInt), int(ni), int(di)) + + Neurons.Set(0, int(MahpN), int(ni), int(di)) + Neurons.Set(0, int(Gmahp), int(ni), int(di)) + Neurons.Set(0, int(SahpCa), int(ni), int(di)) + Neurons.Set(0, int(SahpN), int(ni), int(di)) + Neurons.Set(0, int(Gsahp), int(ni), int(di)) + Neurons.Set(0, int(GknaMed), int(ni), int(di)) + Neurons.Set(0, int(GknaSlow), int(ni), int(di)) + Neurons.Set(ac.Kir.Mrest, int(KirM), int(ni), int(di)) + Neurons.Set(0, int(Gkir), int(ni), int(di)) + + Neurons.Set(0, int(GnmdaSyn), int(ni), int(di)) + Neurons.Set(0, int(Gnmda), int(ni), int(di)) + Neurons.Set(0, int(GnmdaMaint), int(ni), int(di)) + Neurons.Set(0, int(GnmdaLrn), int(ni), int(di)) + Neurons.Set(0, int(NmdaCa), int(ni), int(di)) + + Neurons.Set(0, int(GgabaB), int(ni), int(di)) + Neurons.Set(0, int(GABAB), int(ni), int(di)) + Neurons.Set(0, int(GABABx), int(ni), int(di)) + + Neurons.Set(0, int(Gvgcc), int(ni), int(di)) + Neurons.Set(0, int(VgccM), int(ni), int(di)) + Neurons.Set(0, int(VgccH), int(ni), int(di)) + Neurons.Set(0, int(Gak), int(ni), int(di)) + Neurons.Set(0, int(VgccCaInt), int(ni), int(di)) + + Neurons.Set(1, int(SKCaIn), int(ni), int(di)) + Neurons.Set(0, int(SKCaR), int(ni), int(di)) + Neurons.Set(0, int(SKCaM), int(ni), int(di)) + Neurons.Set(0, int(Gsk), int(ni), int(di)) + + Neurons.Set(0, int(GeExt), int(ni), int(di)) + Neurons.Set(0, int(GeRaw), int(ni), int(di)) + Neurons.Set(0, int(GiRaw), int(ni), int(di)) + Neurons.Set(0, int(GModRaw), int(ni), int(di)) + Neurons.Set(0, int(GModSyn), int(ni), int(di)) + Neurons.Set(0, int(GMaintRaw), int(ni), int(di)) + Neurons.Set(0, int(GMaintSyn), int(ni), int(di)) + + Neurons.Set(0, int(SSGi), int(ni), int(di)) + Neurons.Set(0, int(SSGiDend), int(ni), int(di)) + + Neurons.Set(0, int(Burst), int(ni), int(di)) + Neurons.Set(0, int(BurstPrv), int(ni), int(di)) + + Neurons.Set(0, int(CtxtGe), int(ni), int(di)) + Neurons.Set(0, int(CtxtGeRaw), int(ni), int(di)) + Neurons.Set(0, int(CtxtGeOrig), int(ni), int(di)) ac.InitLongActs(ctx, ni, di) } @@ -1033,11 +1033,11 @@ func (ac *ActParams) InitActs(ctx *Context, ni, di uint32) { // but otherwise not automatically called // (DecayState is used instead) func (ac *ActParams) InitLongActs(ctx *Context, ni, di uint32) { - Neurons.Set(0, SpkPrv, ni, di) - Neurons.Set(0, SpkSt1, ni, di) - Neurons.Set(0, SpkSt2, ni, di) - Neurons.Set(0, ActM, ni, di) - Neurons.Set(0, ActP, ni, di) + Neurons.Set(0, int(SpkPrv), int(ni), int(di)) + Neurons.Set(0, int(SpkSt1), int(ni), int(di)) + Neurons.Set(0, int(SpkSt2), int(ni), int(di)) + Neurons.Set(0, int(ActM), int(ni), int(di)) + Neurons.Set(0, int(ActP), int(ni), int(di)) } //gosl:start @@ -1054,8 +1054,8 @@ func (ac *ActParams) NMDAFromRaw(ctx *Context, ni, di uint32, geTot float32) { if geTot < 0 { geTot = 0 } - Neurons.Set(ac.NMDA.NMDASyn(Neurons[GnmdaSyn, ni, di], geTot), GnmdaSyn, ni, di) - Neurons.Set(ac.NMDA.Gnmda(Neurons[GnmdaSyn, ni, di], Neurons[VmDend, ni, di]), Gnmda, ni, di) + Neurons.Set(ac.NMDA.NMDASyn(Neurons[GnmdaSyn, ni, di], geTot), int(GnmdaSyn), int(ni), int(di)) + Neurons.Set(ac.NMDA.Gnmda(Neurons[GnmdaSyn, ni, di], Neurons[VmDend, ni, di]), int(Gnmda), int(ni), int(di)) // note: nrn.NmdaCa computed via Learn.LrnNMDA in learn.go, CaM method } @@ -1068,25 +1068,25 @@ func (ac *ActParams) MaintNMDAFromRaw(ctx *Context, ni, di uint32) { if ac.SMaint.On.IsTrue() { ac.SMaintFromISI(ctx, ni, di) } - Neurons.Set(ac.MaintNMDA.NMDASyn(Neurons[GMaintSyn, ni, di], Neurons[GMaintRaw, ni, di]), GMaintSyn, ni, di) - Neurons.Set(ac.MaintNMDA.Gnmda(Neurons[GMaintSyn, ni, di], Neurons[VmDend, ni, di]), GnmdaMaint, ni, di) + Neurons.Set(ac.MaintNMDA.NMDASyn(Neurons[GMaintSyn, ni, di], Neurons[GMaintRaw, ni, di]), int(GMaintSyn), int(ni), int(di)) + Neurons.Set(ac.MaintNMDA.Gnmda(Neurons[GMaintSyn, ni, di], Neurons[VmDend, ni, di]), int(GnmdaMaint), int(ni), int(di)) } // SMaintFromISI updates the SMaint self-maintenance current into GMaintRaw func (ac *ActParams) SMaintFromISI(ctx *Context, ni, di uint32) { - isi := Neurons.Value(ISIAvg, ni, di) + isi := Neurons.Value(int(ISIAvg), int(ni), int(di)) if isi < ac.SMaint.ISI.Min || isi > ac.SMaint.ISI.Max { return } ndi := di*ctx.NetIndexes.NNeurons + ni - smp := Neurons.Value(SMaintP, ni, di) + smp := Neurons.Value(int(SMaintP), int(ni), int(di)) smp *= GetRandomNumber(ndi, ctx.RandCtr, RandFunActSMaintP) trg := ac.SMaint.ExpInt(isi) if smp <= trg { smp = 1 - Neurons.SetAdd(ac.SMaint.Gbar, GMaintRaw, ni, di) + Neurons.SetAdd(ac.SMaint.Gbar, int(GMaintRaw), int(ni), int(di)) } - Neurons.Set(smp, SMaintP, ni, di) + Neurons.Set(smp, int(SMaintP), int(ni), int(di)) } // GvgccFromVm updates all the VGCC voltage-gated calcium channel variables @@ -1095,52 +1095,52 @@ func (ac *ActParams) GvgccFromVm(ctx *Context, ni, di uint32) { if ac.VGCC.Gbar == 0 { return } - Neurons.Set(ac.VGCC.Gvgcc(Neurons[VmDend, ni, di], Neurons[VgccM, ni, di], Neurons[VgccH, ni, di]), Gvgcc, ni, di) + Neurons.Set(ac.VGCC.Gvgcc(Neurons[VmDend, ni, di], Neurons[VgccM, ni, di], Neurons[VgccH, ni, di]), int(Gvgcc), int(ni), int(di)) var dm, dh float32 - ac.VGCC.DMHFromV(Neurons.Value(VmDend, ni, di), Neurons.Value(VgccM, ni, di), Neurons.Value(VgccH, ni, di), &dm, &dh) - Neurons.SetAdd(dm, VgccM, ni, di) - Neurons.SetAdd(dh, VgccH, ni, di) + ac.VGCC.DMHFromV(Neurons.Value(int(VmDend), int(ni), int(di)), Neurons.Value(int(VgccM), int(ni), int(di)), Neurons.Value(int(VgccH), int(ni), int(di)), &dm, &dh) + Neurons.SetAdd(dm, int(VgccM), int(ni), int(di)) + Neurons.SetAdd(dh, int(VgccH), int(ni), int(di)) // note: may be overwritten! - Neurons.Set(ac.VGCC.CaFromG(Neurons[VmDend, ni, di], Neurons[Gvgcc, ni, di], Neurons[VgccCa, ni, di]), VgccCa, ni, di) + Neurons.Set(ac.VGCC.CaFromG(Neurons[VmDend, ni, di], Neurons[Gvgcc, ni, di], Neurons[VgccCa, ni, di]), int(VgccCa), int(ni), int(di)) } // GkFromVm updates all the Gk-based conductances: Mahp, KNa, Gak func (ac *ActParams) GkFromVm(ctx *Context, ni, di uint32) { - vm := Neurons.Value(Vm, ni, di) - vmd := Neurons.Value(VmDend, ni, di) - mahpN := Neurons.Value(MahpN, ni, di) + vm := Neurons.Value(int(Vm), int(ni), int(di)) + vmd := Neurons.Value(int(VmDend), int(ni), int(di)) + mahpN := Neurons.Value(int(MahpN), int(ni), int(di)) gmahp := ac.Mahp.GmAHP(vm, &mahpN) - Neurons.Set(gmahp, Gmahp, ni, di) - Neurons.Set(mahpN, MahpN, ni, di) + Neurons.Set(gmahp, int(Gmahp), int(ni), int(di)) + Neurons.Set(mahpN, int(MahpN), int(ni), int(di)) - gsahp := Neurons.Value(Gsahp, ni, di) + gsahp := Neurons.Value(int(Gsahp), int(ni), int(di)) gak := ac.AK.Gak(vmd) - Neurons.Set(gak, Gak, ni, di) + Neurons.Set(gak, int(Gak), int(ni), int(di)) - nrnKirM := Neurons.Value(KirM, ni, di) + nrnKirM := Neurons.Value(int(KirM), int(ni), int(di)) gkir := ac.Kir.Gkir(vm, &nrnKirM) - Neurons.Set(gkir, Gkir, ni, di) - Neurons.Set(nrnKirM, KirM, ni, di) + Neurons.Set(gkir, int(Gkir), int(ni), int(di)) + Neurons.Set(nrnKirM, int(KirM), int(ni), int(di)) gktot := gmahp + gsahp + gak + gkir if ac.KNa.On.IsTrue() { - gknaMed := Neurons.Value(GknaMed, ni, di) - gknaSlow := Neurons.Value(GknaSlow, ni, di) - ac.KNa.GcFromSpike(&gknaMed, &gknaSlow, Neurons.Value(Spike, ni, di) > .5) - Neurons.Set(gknaMed, GknaMed, ni, di) - Neurons.Set(gknaSlow, GknaSlow, ni, di) + gknaMed := Neurons.Value(int(GknaMed), int(ni), int(di)) + gknaSlow := Neurons.Value(int(GknaSlow), int(ni), int(di)) + ac.KNa.GcFromSpike(&gknaMed, &gknaSlow, Neurons.Value(int(Spike), int(ni), int(di)) > .5) + Neurons.Set(gknaMed, int(GknaMed), int(ni), int(di)) + Neurons.Set(gknaSlow, int(GknaSlow), int(ni), int(di)) gktot += gknaMed + gknaSlow } - Neurons.Set(gktot, Gk, ni, di) + Neurons.Set(gktot, int(Gk), int(ni), int(di)) } // KNaNewState does TrialSlow version of KNa during NewState if option is set func (ac *ActParams) KNaNewState(ctx *Context, ni, di uint32) { if ac.KNa.On.IsTrue() && ac.KNa.TrialSlow.IsTrue() { - Neurons.SetAdd(ac.KNa.Slow.Max*Neurons[SpkPrv, ni, di], GknaSlow, ni, di) + Neurons.SetAdd(ac.KNa.Slow.Max*Neurons[SpkPrv, ni, di], int(GknaSlow), int(ni), int(di)) } } @@ -1149,34 +1149,34 @@ func (ac *ActParams) GSkCaFromCa(ctx *Context, ni, di uint32) { if ac.SKCa.Gbar == 0 { return } - skcar := Neurons.Value(SKCaR, ni, di) - skcain := Neurons.Value(SKCaIn, ni, di) - Neurons.Set(ac.SKCa.MFromCa(skcar, Neurons[SKCaM, ni, di]), SKCaM, ni, di) - ac.SKCa.CaInRFromSpike(Neurons.Value(Spike, ni, di), Neurons.Value(CaSpkD, ni, di), &skcain, &skcar) - Neurons.Set(skcar, SKCaR, ni, di) - Neurons.Set(skcain, SKCaIn, ni, di) - Neurons.Set(ac.SKCa.Gbar*Neurons[SKCaM, ni, di], Gsk, ni, di) - Neurons.SetAdd(Neurons[Gsk, ni, di], Gk, ni, di) + skcar := Neurons.Value(int(SKCaR), int(ni), int(di)) + skcain := Neurons.Value(int(SKCaIn), int(ni), int(di)) + Neurons.Set(ac.SKCa.MFromCa(skcar, Neurons[SKCaM, ni, di]), int(SKCaM), int(ni), int(di)) + ac.SKCa.CaInRFromSpike(Neurons.Value(int(Spike), int(ni), int(di)), Neurons.Value(int(CaSpkD), int(ni), int(di)), &skcain, &skcar) + Neurons.Set(skcar, int(SKCaR), int(ni), int(di)) + Neurons.Set(skcain, int(SKCaIn), int(ni), int(di)) + Neurons.Set(ac.SKCa.Gbar*Neurons[SKCaM, ni, di], int(Gsk), int(ni), int(di)) + Neurons.SetAdd(Neurons[Gsk, ni, di], int(Gk), int(ni), int(di)) } // GeFromSyn integrates Ge excitatory conductance from GeSyn. // geExt is extra conductance to add to the final Ge value func (ac *ActParams) GeFromSyn(ctx *Context, ni, di uint32, geSyn, geExt float32) { - Neurons.Set(0, GeExt, ni, di) + Neurons.Set(0, int(GeExt), int(ni), int(di)) if ac.Clamp.Add.IsTrue() && NrnHasFlag(ctx, ni, di, NeuronHasExt) { - Neurons.Set(Neurons[Ext, ni, di]*ac.Clamp.Ge, GeExt, ni, di) - geSyn += Neurons.Value(GeExt, ni, di) + Neurons.Set(Neurons[Ext, ni, di]*ac.Clamp.Ge, int(GeExt), int(ni), int(di)) + geSyn += Neurons.Value(int(GeExt), int(ni), int(di)) } if ac.Clamp.Add.IsFalse() && NrnHasFlag(ctx, ni, di, NeuronHasExt) { // todo: this flag check is not working - geSyn = Neurons.Value(Ext, ni, di) * ac.Clamp.Ge - Neurons.Set(geSyn, GeExt, ni, di) + geSyn = Neurons.Value(int(Ext), int(ni), int(di)) * ac.Clamp.Ge + Neurons.Set(geSyn, int(GeExt), int(ni), int(di)) geExt = 0 // no extra in this case } - Neurons.Set(geSyn+geExt, Ge, ni, di) - if Neurons.Value(Ge, ni, di) < 0.0 { - Neurons.Set(0, Ge, ni, di) + Neurons.Set(geSyn+geExt, int(Ge), int(ni), int(di)) + if Neurons.Value(int(Ge), int(ni), int(di)) < 0.0 { + Neurons.Set(0, int(Ge), int(ni), int(di)) } ac.AddGeNoise(ctx, ni, di) } @@ -1186,11 +1186,11 @@ func (ac *ActParams) AddGeNoise(ctx *Context, ni, di uint32) { if ac.Noise.On.IsFalse() || ac.Noise.Ge == 0 { return } - p := Neurons.Value(GeNoiseP, ni, di) + p := Neurons.Value(int(GeNoiseP), int(ni), int(di)) ge := ac.Noise.PGe(ctx, &p, ni, di) - Neurons.Set(p, GeNoiseP, ni, di) - Neurons.Set(ac.Dt.GeSynFromRaw(Neurons[GeNoise, ni, di], ge), GeNoise, ni, di) - Neurons.SetAdd(Neurons[GeNoise, ni, di], Ge, ni, di) + Neurons.Set(p, int(GeNoiseP), int(ni), int(di)) + Neurons.Set(ac.Dt.GeSynFromRaw(Neurons[GeNoise, ni, di], ge), int(GeNoise), int(ni), int(di)) + Neurons.SetAdd(Neurons[GeNoise, ni, di], int(Ge), int(ni), int(di)) } // AddGiNoise updates nrn.GiNoise if active @@ -1198,10 +1198,10 @@ func (ac *ActParams) AddGiNoise(ctx *Context, ni, di uint32) { if ac.Noise.On.IsFalse() || ac.Noise.Gi == 0 { return } - p := Neurons.Value(GiNoiseP, ni, di) + p := Neurons.Value(int(GiNoiseP), int(ni), int(di)) gi := ac.Noise.PGi(ctx, &p, ni, di) - Neurons.Set(p, GiNoiseP, ni, di) - Neurons.Set(ac.Dt.GiSynFromRaw(Neurons[GiNoise, ni, di], gi), GiNoise, ni, di) + Neurons.Set(p, int(GiNoiseP), int(ni), int(di)) + Neurons.Set(ac.Dt.GiSynFromRaw(Neurons[GiNoise, ni, di], gi), int(GiNoise), int(ni), int(di)) } // GiFromSyn integrates GiSyn inhibitory synaptic conductance from GiRaw value @@ -1246,20 +1246,20 @@ func (ac *ActParams) VmFromG(ctx *Context, ni, di uint32) { updtVm := true // note: nrn.ISI has NOT yet been updated at this point: 0 right after spike, etc // so it takes a full 3 time steps after spiking for Tr period - isi := Neurons.Value(ISI, ni, di) + isi := Neurons.Value(int(ISI), int(ni), int(di)) if ac.Spikes.Tr > 0 && isi >= 0 && isi < float32(ac.Spikes.Tr) { updtVm = false // don't update the spiking vm during refract } - ge := Neurons.Value(Ge, ni, di) * ac.Gbar.E - gi := Neurons.Value(Gi, ni, di) * ac.Gbar.I - gk := Neurons.Value(Gk, ni, di) * ac.Gbar.K + ge := Neurons.Value(int(Ge), int(ni), int(di)) * ac.Gbar.E + gi := Neurons.Value(int(Gi), int(ni), int(di)) * ac.Gbar.I + gk := Neurons.Value(int(Gk), int(ni), int(di)) * ac.Gbar.K var nvm, inet, expi float32 if updtVm { - ac.VmInteg(Neurons.Value(Vm, ni, di), ac.Dt.VmDt, ge, 1, gi, gk, &nvm, &inet) + ac.VmInteg(Neurons.Value(int(Vm), int(ni), int(di)), ac.Dt.VmDt, ge, 1, gi, gk, &nvm, &inet) if updtVm && ac.Spikes.Exp.IsTrue() { // add spike current if relevant var exVm float32 - exVm = 0.5 * (nvm + Neurons.Value(Vm, ni, di)) // midpoint for this + exVm = 0.5 * (nvm + Neurons.Value(int(Vm), int(ni), int(di))) // midpoint for this expi = ac.Gbar.L * ac.Spikes.ExpSlope * math32.FastExp((exVm-ac.Spikes.Thr)/ac.Spikes.ExpSlope) if expi > ac.Dt.VmTau { @@ -1268,17 +1268,17 @@ func (ac *ActParams) VmFromG(ctx *Context, ni, di uint32) { inet += expi nvm = ac.VmFromInet(nvm, ac.Dt.VmDt, expi) } - Neurons.Set(nvm, Vm, ni, di) - Neurons.Set(inet, Inet, ni, di) + Neurons.Set(nvm, int(Vm), int(ni), int(di)) + Neurons.Set(inet, int(Inet), int(ni), int(di)) } else { // decay back to VmR var dvm float32 if int32(isi) == ac.Spikes.Tr-1 { - dvm = ac.Spikes.VmR - Neurons.Value(Vm, ni, di) + dvm = ac.Spikes.VmR - Neurons.Value(int(Vm), int(ni), int(di)) } else { - dvm = ac.Spikes.RDt * (ac.Spikes.VmR - Neurons.Value(Vm, ni, di)) + dvm = ac.Spikes.RDt * (ac.Spikes.VmR - Neurons.Value(int(Vm), int(ni), int(di))) } - Neurons.Set(Neurons[Vm, ni, di]+dvm, Vm, ni, di) - Neurons.Set(dvm*ac.Dt.VmTau, Inet, ni, di) + Neurons.Set(Neurons[Vm, ni, di]+dvm, int(Vm), int(ni), int(di)) + Neurons.Set(dvm*ac.Dt.VmTau, int(Inet), int(ni), int(di)) } glEff := float32(1) @@ -1286,12 +1286,12 @@ func (ac *ActParams) VmFromG(ctx *Context, ni, di uint32) { glEff += ac.Dend.GbarR } var giEff float32 - giEff = gi + ac.Gbar.I*Neurons.Value(SSGiDend, ni, di) - ac.VmInteg(Neurons.Value(VmDend, ni, di), ac.Dt.VmDendDt, ge, glEff, giEff, gk, &nvm, &inet) + giEff = gi + ac.Gbar.I*Neurons.Value(int(SSGiDend), int(ni), int(di)) + ac.VmInteg(Neurons.Value(int(VmDend), int(ni), int(di)), ac.Dt.VmDendDt, ge, glEff, giEff, gk, &nvm, &inet) if updtVm { nvm = ac.VmFromInet(nvm, ac.Dt.VmDendDt, ac.Dend.GbarExp*expi) } - Neurons.Set(nvm, VmDend, ni, di) + Neurons.Set(nvm, int(VmDend), int(ni), int(di)) } // SpikeFromVmVars computes Spike from Vm and ISI-based activation, using pointers to variables @@ -1342,18 +1342,18 @@ func (ac *ActParams) SpikeFromVmVars(nrnISI, nrnISIAvg, nrnSpike, nrnSpiked, nrn // SpikeFromVm computes Spike from Vm and ISI-based activation func (ac *ActParams) SpikeFromVm(ctx *Context, ni, di uint32) { - nrnISI := Neurons.Value(ISI, ni, di) - nrnISIAvg := Neurons.Value(ISIAvg, ni, di) - nrnSpike := Neurons.Value(Spike, ni, di) - nrnSpiked := Neurons.Value(Spiked, ni, di) - nrnAct := Neurons.Value(Act, ni, di) - nrnVm := Neurons.Value(Vm, ni, di) + nrnISI := Neurons.Value(int(ISI), int(ni), int(di)) + nrnISIAvg := Neurons.Value(int(ISIAvg), int(ni), int(di)) + nrnSpike := Neurons.Value(int(Spike), int(ni), int(di)) + nrnSpiked := Neurons.Value(int(Spiked), int(ni), int(di)) + nrnAct := Neurons.Value(int(Act), int(ni), int(di)) + nrnVm := Neurons.Value(int(Vm), int(ni), int(di)) ac.SpikeFromVmVars(&nrnISI, &nrnISIAvg, &nrnSpike, &nrnSpiked, &nrnAct, nrnVm) - Neurons.Set(nrnISI, ISI, ni, di) - Neurons.Set(nrnISIAvg, ISIAvg, ni, di) - Neurons.Set(nrnSpike, Spike, ni, di) - Neurons.Set(nrnSpiked, Spiked, ni, di) - Neurons.Set(nrnAct, Act, ni, di) + Neurons.Set(nrnISI, int(ISI), int(ni), int(di)) + Neurons.Set(nrnISIAvg, int(ISIAvg), int(ni), int(di)) + Neurons.Set(nrnSpike, int(Spike), int(ni), int(di)) + Neurons.Set(nrnSpiked, int(Spiked), int(ni), int(di)) + Neurons.Set(nrnAct, int(Act), int(ni), int(di)) } //gosl:end diff --git a/axon/act_path.go b/axon/act_path.go index ceb2a8ac..363d2136 100644 --- a/axon/act_path.go +++ b/axon/act_path.go @@ -199,7 +199,7 @@ func (sc *SynComParams) WtFail(ctx *Context, swt float32) bool { func (sc *SynComParams) Fail(ctx *Context, syni uint32, swt float32) { if sc.PFail > 0 { if sc.WtFail(ctx, swt) { - Synapses.Set(0, Wt, syni) + Synapses.Set(0, int(Wt), int(syni)) } } } diff --git a/axon/helpers.go b/axon/helpers.go index ce52e72a..da66f719 100644 --- a/axon/helpers.go +++ b/axon/helpers.go @@ -9,7 +9,6 @@ import ( "cogentcore.org/core/base/mpi" "cogentcore.org/core/core" - "github.com/emer/emergent/v2/ecmd" ) //////////////////////////////////////////////////// @@ -51,6 +50,7 @@ func SaveWeights(net *Network, ctrString, runName string) string { return fnm } +/* // SaveWeightsIfArgSet saves network weights if the "wts" arg has been set to true. // uses WeightsFilename information to identify the weights. // only for 0 rank MPI if running mpi @@ -61,6 +61,7 @@ func SaveWeightsIfArgSet(net *Network, args *ecmd.Args, ctrString, runName strin } return "" } +*/ // SaveWeightsIfConfigSet saves network weights if the given config // bool value has been set to true. diff --git a/axon/hip_net.go b/axon/hip_net.go index ed0f8c07..355cca10 100644 --- a/axon/hip_net.go +++ b/axon/hip_net.go @@ -7,7 +7,7 @@ package axon import ( "cogentcore.org/core/base/errors" "cogentcore.org/core/math32/vecint" - "cogentcore.org/core/tensor/stats/norm" + "cogentcore.org/core/tensor/stats/stats" "github.com/emer/emergent/v2/emer" "github.com/emer/emergent/v2/etime" "github.com/emer/emergent/v2/looper" @@ -258,7 +258,7 @@ func (net *Network) ConfigLoopsHip(ctx *Context, man *looper.Manager, hip *HipCo for di := uint32(0); di < ctx.NetIndexes.NData; di++ { clampSrc.UnitValues(&tmpValues, "Act", int(di)) if hip.EC5ClampThr > 0 { - norm.Binarize32(tmpValues, hip.EC5ClampThr, 1, 0) + stats.Binarize32(tmpValues, tensor.NewFloat64Scalar(hip.EC5ClampThr)) } ec5.ApplyExt1D32(ctx, di, tmpValues) } diff --git a/axon/layerparams.go b/axon/layerparams.go index 497ab295..3dc20edf 100644 --- a/axon/layerparams.go +++ b/axon/layerparams.go @@ -104,15 +104,15 @@ type LayerInhibIndexes struct { func SetNeuronExtPosNeg(ctx *Context, ni, di uint32, val float32) { if ni == 0 { if val >= 0 { - Neurons.Set(val, Ext, ni, di) + Neurons.Set(val, int(Ext), int(ni), int(di)) } else { - Neurons.Set(0, Ext, ni, di) + Neurons.Set(0, int(Ext), int(ni), int(di)) } } else { if val >= 0 { - Neurons.Set(0, Ext, ni, di) + Neurons.Set(0, int(Ext), int(ni), int(di)) } else { - Neurons.Set(-val, Ext, ni, di) + Neurons.Set(-val, int(Ext), int(ni), int(di)) } } } @@ -327,8 +327,8 @@ func (ly *LayerParams) ApplyExtFlags(clearMask, setMask *NeuronFlags, toTarg *bo // InitExt initializes external input state for given neuron func (ly *LayerParams) InitExt(ctx *Context, ni, di uint32) { - Neurons.Set(0, Ext, ni, di) - Neurons.Set(0, Target, ni, di) + Neurons.Set(0, int(Ext), int(ni), int(di)) + Neurons.Set(0, int(Target), int(ni), int(di)) NrnClearFlag(ctx, ni, di, NeuronHasExt|NeuronHasTarg|NeuronHasCmpr) } @@ -344,9 +344,9 @@ func (ly *LayerParams) ApplyExtValue(ctx *Context, ni, di uint32, val float32) { var toTarg bool ly.ApplyExtFlags(&clearMask, &setMask, &toTarg) if toTarg { - Neurons.Set(val, Target, ni, di) + Neurons.Set(val, int(Target), int(ni), int(di)) } else { - Neurons.Set(val, Ext, ni, di) + Neurons.Set(val, int(Ext), int(ni), int(di)) } NrnClearFlag(ctx, ni, di, clearMask) NrnSetFlag(ctx, ni, di, setMask) @@ -435,14 +435,14 @@ func (ly *LayerParams) SubPoolGiFromSpikes(ctx *Context, di uint32, pl *Pool, lp // GatherSpikesInit initializes G*Raw and G*Syn values for given neuron // prior to integration func (ly *LayerParams) GatherSpikesInit(ctx *Context, ni, di uint32) { - Neurons.Set(0, GeRaw, ni, di) - Neurons.Set(0, GiRaw, ni, di) - Neurons.Set(0, GModRaw, ni, di) - Neurons.Set(0, GModSyn, ni, di) - Neurons.Set(0, GMaintRaw, ni, di) - Neurons.Set(0, CtxtGeRaw, ni, di) - Neurons.Set(NeuronAvgs[GeBase, ni], GeSyn, ni, di) - Neurons.Set(NeuronAvgs[GiBase, ni], GiSyn, ni, di) + Neurons.Set(0, int(GeRaw), int(ni), int(di)) + Neurons.Set(0, int(GiRaw), int(ni), int(di)) + Neurons.Set(0, int(GModRaw), int(ni), int(di)) + Neurons.Set(0, int(GModSyn), int(ni), int(di)) + Neurons.Set(0, int(GMaintRaw), int(ni), int(di)) + Neurons.Set(0, int(CtxtGeRaw), int(ni), int(di)) + Neurons.Set(NeuronAvgs[GeBase, ni], int(GeSyn), int(ni), int(di)) + Neurons.Set(NeuronAvgs[GiBase, ni], int(GiSyn), int(ni), int(di)) } //////////////////////// @@ -455,33 +455,33 @@ func (ly *LayerParams) SpecialPreGs(ctx *Context, ni, di uint32, pl *Pool, vals saveVal := float32(0) // sometimes we need to use a value computed here, for the post Gs step pi := NrnI(ctx, ni, NrnSubPool) - 1 // 0-n pool index pni := NrnI(ctx, ni, NrnNeurIndex) - pl.StIndex - nrnCtxtGe := Neurons.Value(CtxtGe, ni, di) - nrnGeRaw := Neurons.Value(GeRaw, ni, di) + nrnCtxtGe := Neurons.Value(int(CtxtGe), int(ni), int(di)) + nrnGeRaw := Neurons.Value(int(GeRaw), int(ni), int(di)) hasRew := GlbV(ctx, di, GvHasRew) > 0 switch ly.LayType { case CTLayer: fallthrough case PTPredLayer: geCtxt := ly.CT.GeGain * nrnCtxtGe - Neurons.SetAdd(geCtxt, GeRaw, ni, di) + Neurons.SetAdd(geCtxt, int(GeRaw), int(ni), int(di)) if ly.CT.DecayDt > 0 { - Neurons.SetSub(ly.CT.DecayDt*nrnCtxtGe, CtxtGe, ni, di) + Neurons.SetSub(ly.CT.DecayDt*nrnCtxtGe, int(CtxtGe), int(ni), int(di)) } ctxExt := ly.Acts.Dt.GeSynFromRawSteady(geCtxt) - Neurons.SetAdd(ctxExt, GeSyn, ni, di) + Neurons.SetAdd(ctxExt, int(GeSyn), int(ni), int(di)) saveVal = ctxExt // used In PostGs to set nrn.GeExt case PTMaintLayer: if ly.Acts.SMaint.On.IsTrue() { - saveVal = ly.Acts.SMaint.Inhib * Neurons.Value(GMaintRaw, ni, di) // used In PostGs to set nrn.GeExt + saveVal = ly.Acts.SMaint.Inhib * Neurons.Value(int(GMaintRaw), int(ni), int(di)) // used In PostGs to set nrn.GeExt } case PulvinarLayer: if ctx.PlusPhase.IsFalse() { break } // geSyn, goes into nrn.GeExt in PostGs, so inhibition gets it - saveVal = nonDrivePct*Neurons.Value(GeSyn, ni, di) + ly.Acts.Dt.GeSynFromRawSteady(drvGe) - Neurons.Set(nonDrivePct*nrnGeRaw+drvGe, GeRaw, ni, di) - Neurons.Set(saveVal, GeSyn, ni, di) + saveVal = nonDrivePct*Neurons.Value(int(GeSyn), int(ni), int(di)) + ly.Acts.Dt.GeSynFromRawSteady(drvGe) + Neurons.Set(nonDrivePct*nrnGeRaw+drvGe, int(GeRaw), int(ni), int(di)) + Neurons.Set(saveVal, int(GeSyn), int(ni), int(di)) case VSGatedLayer: dr := float32(0) if pi == 0 { @@ -490,16 +490,16 @@ func (ly *LayerParams) SpecialPreGs(ctx *Context, ni, di uint32, pl *Pool, vals dr = GlbV(ctx, di, GvVSMatrixHasGated) } dr = math32.Abs(dr) - Neurons.Set(dr, GeRaw, ni, di) - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(dr), GeSyn, ni, di) + Neurons.Set(dr, int(GeRaw), int(ni), int(di)) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(dr), int(GeSyn), int(ni), int(di)) case BLALayer: if ly.Learn.NeuroMod.IsBLAExt() { mod := max(-GlbV(ctx, di, GvDA), 0) // ext is modulated by negative da - geCtxt := mod * ly.CT.GeGain * Neurons.Value(CtxtGeOrig, ni, di) - Neurons.SetAdd(geCtxt, GeRaw, ni, di) + geCtxt := mod * ly.CT.GeGain * Neurons.Value(int(CtxtGeOrig), int(ni), int(di)) + Neurons.SetAdd(geCtxt, int(GeRaw), int(ni), int(di)) ctxExt := ly.Acts.Dt.GeSynFromRawSteady(geCtxt) - Neurons.SetAdd(ctxExt, GeSyn, ni, di) + Neurons.SetAdd(ctxExt, int(GeSyn), int(ni), int(di)) saveVal = ctxExt // used In PostGs to set nrn.GeExt } case LHbLayer: @@ -509,24 +509,24 @@ func (ly *LayerParams) SpecialPreGs(ctx *Context, ni, di uint32, pl *Pool, vals } else { geRaw = 0.2 * math32.Abs(GlbV(ctx, di, GvLHbBurst)) } - Neurons.Set(geRaw, GeRaw, ni, di) - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), GeSyn, ni, di) + Neurons.Set(geRaw, int(GeRaw), int(ni), int(di)) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(GeSyn), int(ni), int(di)) case DrivesLayer: dr := GlbUSposV(ctx, di, GvDrives, uint32(pi)) geRaw := dr if dr > 0 { geRaw = ly.Acts.PopCode.EncodeGe(pni, uint32(pl.NNeurons()), dr) } - Neurons.Set(geRaw, GeRaw, ni, di) - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), GeSyn, ni, di) + Neurons.Set(geRaw, int(GeRaw), int(ni), int(di)) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(GeSyn), int(ni), int(di)) case UrgencyLayer: ur := GlbV(ctx, di, GvUrgency) geRaw := ur if ur > 0 { geRaw = ly.Acts.PopCode.EncodeGe(pni, uint32(pl.NNeurons()), ur) } - Neurons.Set(geRaw, GeRaw, ni, di) - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), GeSyn, ni, di) + Neurons.Set(geRaw, int(GeRaw), int(ni), int(di)) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(GeSyn), int(ni), int(di)) case USLayer: us := RubiconUSStimValue(ctx, di, pi, ly.Learn.NeuroMod.Valence) geRaw := us @@ -535,8 +535,8 @@ func (ly *LayerParams) SpecialPreGs(ctx *Context, ni, di uint32, pl *Pool, vals } // D2Mod = final if ly.Learn.NeuroMod.DAMod == D1Mod || (ly.Learn.NeuroMod.DAMod == D2Mod && hasRew && ctx.PlusPhase.IsTrue()) { - Neurons.Set(geRaw, GeRaw, ni, di) - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), GeSyn, ni, di) + Neurons.Set(geRaw, int(GeRaw), int(ni), int(di)) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(GeSyn), int(ni), int(di)) } case PVLayer: if hasRew && ctx.PlusPhase.IsTrue() { @@ -547,29 +547,29 @@ func (ly *LayerParams) SpecialPreGs(ctx *Context, ni, di uint32, pl *Pool, vals pv = GlbV(ctx, di, GvPVneg) } pc := ly.Acts.PopCode.EncodeGe(pni, ly.Indexes.NeurN, pv) - Neurons.Set(pc, GeRaw, ni, di) - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(pc), GeSyn, ni, di) + Neurons.Set(pc, int(GeRaw), int(ni), int(di)) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(pc), int(GeSyn), int(ni), int(di)) } case LDTLayer: geRaw := 0.4 * GlbV(ctx, di, GvACh) - Neurons.Set(geRaw, GeRaw, ni, di) - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), GeSyn, ni, di) + Neurons.Set(geRaw, int(GeRaw), int(ni), int(di)) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(GeSyn), int(ni), int(di)) case VTALayer: geRaw := ly.RWDa.GeFromDA(GlbV(ctx, di, GvVtaDA)) - Neurons.Set(geRaw, GeRaw, ni, di) - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), GeSyn, ni, di) + Neurons.Set(geRaw, int(GeRaw), int(ni), int(di)) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(GeSyn), int(ni), int(di)) case RewLayer: NrnSetFlag(ctx, ni, di, NeuronHasExt) SetNeuronExtPosNeg(ctx, ni, di, GlbV(ctx, di, GvRew)) // Rew must be set in Context! case RWDaLayer: geRaw := ly.RWDa.GeFromDA(GlbV(ctx, di, GvDA)) - Neurons.Set(geRaw, GeRaw, ni, di) - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), GeSyn, ni, di) + Neurons.Set(geRaw, int(GeRaw), int(ni), int(di)) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(GeSyn), int(ni), int(di)) case TDDaLayer: geRaw := ly.TDDa.GeFromDA(GlbV(ctx, di, GvDA)) - Neurons.Set(geRaw, GeRaw, ni, di) - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), GeSyn, ni, di) + Neurons.Set(geRaw, int(GeRaw), int(ni), int(di)) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(GeSyn), int(ni), int(di)) case TDIntegLayer: NrnSetFlag(ctx, ni, di, NeuronHasExt) SetNeuronExtPosNeg(ctx, ni, di, GlbV(ctx, di, GvRewPred)) @@ -589,12 +589,12 @@ func (ly *LayerParams) SpecialPostGs(ctx *Context, ni, di uint32, saveVal float3 case PTMaintLayer: fallthrough case PulvinarLayer: - Neurons.Set(saveVal, GeExt, ni, di) + Neurons.Set(saveVal, int(GeExt), int(ni), int(di)) case PTPredLayer: - Neurons.Set(saveVal, GeExt, ni, di) - orig := Neurons.Value(CtxtGeOrig, ni, di) + Neurons.Set(saveVal, int(GeExt), int(ni), int(di)) + orig := Neurons.Value(int(CtxtGeOrig), int(ni), int(di)) if orig < 0.05 { - Neurons.Set(0, Ge, ni, di) + Neurons.Set(0, int(Ge), int(ni), int(di)) } } } @@ -605,8 +605,8 @@ func (ly *LayerParams) SpecialPostGs(ctx *Context, ni, di uint32, saveVal float3 func (ly *LayerParams) GFromRawSyn(ctx *Context, ni, di uint32) { extraRaw := float32(0) extraSyn := float32(0) - nrnGModRaw := Neurons.Value(GModRaw, ni, di) - nrnGModSyn := Neurons.Value(GModSyn, ni, di) + nrnGModRaw := Neurons.Value(int(GModRaw), int(ni), int(di)) + nrnGModSyn := Neurons.Value(int(GModSyn), int(ni), int(di)) ach := GlbV(ctx, di, GvACh) switch ly.LayType { case PTMaintLayer: @@ -616,8 +616,8 @@ func (ly *LayerParams) GFromRawSyn(ctx *Context, ni, di uint32) { } mod += ly.Acts.Dend.ModBase // key: excluding GModMaint here, so active maintenance can persist - Neurons.SetMul(mod, GeRaw, ni, di) - Neurons.SetMul(mod, GeSyn, ni, di) + Neurons.SetMul(mod, int(GeRaw), int(ni), int(di)) + Neurons.SetMul(mod, int(GeSyn), int(ni), int(di)) extraRaw = ly.Acts.Dend.ModGain * nrnGModRaw if ly.Acts.Dend.ModACh.IsTrue() { extraRaw *= ach @@ -634,55 +634,55 @@ func (ly *LayerParams) GFromRawSyn(ctx *Context, ni, di uint32) { if mod > 1 { mod = 1 } - Neurons.SetMul(mod, GeRaw, ni, di) - Neurons.SetMul(mod, GeSyn, ni, di) + Neurons.SetMul(mod, int(GeRaw), int(ni), int(di)) + Neurons.SetMul(mod, int(GeSyn), int(ni), int(di)) } } - geRaw := Neurons.Value(GeRaw, ni, di) - geSyn := Neurons.Value(GeSyn, ni, di) + geRaw := Neurons.Value(int(GeRaw), int(ni), int(di)) + geSyn := Neurons.Value(int(GeSyn), int(ni), int(di)) ly.Acts.NMDAFromRaw(ctx, ni, di, geRaw+extraRaw) ly.Acts.MaintNMDAFromRaw(ctx, ni, di) // uses GMaintRaw directly ly.Learn.LrnNMDAFromRaw(ctx, ni, di, geRaw) ly.Acts.GvgccFromVm(ctx, ni, di) - ege := Neurons.Value(Gnmda, ni, di) + Neurons.Value(GnmdaMaint, ni, di) + Neurons.Value(Gvgcc, ni, di) + extraSyn + ege := Neurons.Value(int(Gnmda), int(ni), int(di)) + Neurons.Value(int(GnmdaMaint), int(ni), int(di)) + Neurons.Value(int(Gvgcc), int(ni), int(di)) + extraSyn ly.Acts.GeFromSyn(ctx, ni, di, geSyn, ege) // sets nrn.GeExt too ly.Acts.GkFromVm(ctx, ni, di) ly.Acts.GSkCaFromCa(ctx, ni, di) - Neurons.Set(ly.Acts.GiFromSyn(ctx, ni, di, Neurons[GiSyn, ni, di]), GiSyn, ni, di) + Neurons.Set(ly.Acts.GiFromSyn(ctx, ni, di, Neurons[GiSyn, ni, di]), int(GiSyn), int(ni), int(di)) } // GiInteg adds Gi values from all sources including SubPool computed inhib // and updates GABAB as well func (ly *LayerParams) GiInteg(ctx *Context, ni, di uint32, pl *Pool, vals *LayerValues) { - gi := vals.ActAvg.GiMult*pl.Inhib.Gi + Neurons.Value(GiSyn, ni, di) + Neurons.Value(GiNoise, ni, di) + ly.Learn.NeuroMod.GiFromACh(GlbV(ctx, di, GvACh)) - Neurons.Set(gi, Gi, ni, di) - Neurons.Set(pl.Inhib.SSGi, SSGi, ni, di) - Neurons.Set(0, SSGiDend, ni, di) + gi := vals.ActAvg.GiMult*pl.Inhib.Gi + Neurons.Value(int(GiSyn), int(ni), int(di)) + Neurons.Value(int(GiNoise), int(ni), int(di)) + ly.Learn.NeuroMod.GiFromACh(GlbV(ctx, di, GvACh)) + Neurons.Set(gi, int(Gi), int(ni), int(di)) + Neurons.Set(pl.Inhib.SSGi, int(SSGi), int(ni), int(di)) + Neurons.Set(0, int(SSGiDend), int(ni), int(di)) if ctx.PlusPhase.IsTrue() && ly.LayType == PulvinarLayer { - ext := Neurons.Value(Ext, ni, di) // nonDrivePct - Neurons.Set(ext*ly.Acts.Dend.SSGi*pl.Inhib.SSGi, SSGiDend, ni, di) + ext := Neurons.Value(int(Ext), int(ni), int(di)) // nonDrivePct + Neurons.Set(ext*ly.Acts.Dend.SSGi*pl.Inhib.SSGi, int(SSGiDend), int(ni), int(di)) } else { if !(ly.Acts.Clamp.IsInput.IsTrue() || ly.Acts.Clamp.IsTarget.IsTrue()) { - Neurons.Set(ly.Acts.Dend.SSGi*pl.Inhib.SSGi, SSGiDend, ni, di) + Neurons.Set(ly.Acts.Dend.SSGi*pl.Inhib.SSGi, int(SSGiDend), int(ni), int(di)) } } - vm := Neurons.Value(VmDend, ni, di) - nrnGABAB := Neurons.Value(GABAB, ni, di) - nrnGABABx := Neurons.Value(GABABx, ni, di) + vm := Neurons.Value(int(VmDend), int(ni), int(di)) + nrnGABAB := Neurons.Value(int(GABAB), int(ni), int(di)) + nrnGABABx := Neurons.Value(int(GABABx), int(ni), int(di)) ly.Acts.GabaB.GABAB(gi, &nrnGABAB, &nrnGABABx) - Neurons.Set(nrnGABAB, GABAB, ni, di) - Neurons.Set(nrnGABABx, GABABx, ni, di) + Neurons.Set(nrnGABAB, int(GABAB), int(ni), int(di)) + Neurons.Set(nrnGABABx, int(GABABx), int(ni), int(di)) nrnGgabaB := ly.Acts.GabaB.GgabaB(nrnGABAB, vm) - Neurons.Set(nrnGgabaB, GgabaB, ni, di) + Neurons.Set(nrnGgabaB, int(GgabaB), int(ni), int(di)) // Gk was already init - Neurons.SetAdd(nrnGgabaB, Gk, ni, di) + Neurons.SetAdd(nrnGgabaB, int(Gk), int(ni), int(di)) } // GNeuroMod does neuromodulation of conductances func (ly *LayerParams) GNeuroMod(ctx *Context, ni, di uint32, vals *LayerValues) { ggain := ly.Learn.NeuroMod.GGain(GlbV(ctx, di, GvDA) + GlbV(ctx, di, GvDAtonic)) - Neurons.SetMul(ggain, Ge, ni, di) - Neurons.SetMul(ggain, Gi, ni, di) + Neurons.SetMul(ggain, int(Ge), int(ni), int(di)) + Neurons.SetMul(ggain, int(Gi), int(ni), int(di)) } //////////////////////// @@ -695,37 +695,37 @@ func (ly *LayerParams) SpikeFromG(ctx *Context, ni, di uint32, lpl *Pool) { ly.Learn.CaFromSpike(ctx, ni, di) lmax := lpl.AvgMax.GeInt.Cycle.Max if lmax > 0 { - Neurons.Set(Neurons[GeInt, ni, di]/lmax, GeIntNorm, ni, di) + Neurons.Set(Neurons[GeInt, ni, di]/lmax, int(GeIntNorm), int(ni), int(di)) } else { - Neurons.Set(Neurons[GeInt, ni, di], GeIntNorm, ni, di) + Neurons.Set(Neurons[GeInt, ni, di], int(GeIntNorm), int(ni), int(di)) } if ctx.Cycle >= ly.Acts.Dt.MaxCycStart { - Neurons.SetAdd(ly.Learn.CaSpk.Dt.PDt*(Neurons[CaSpkM, ni, di]-Neurons[SpkMaxCa, ni, di]), SpkMaxCa, ni, di) - spkmax := Neurons.Value(SpkMaxCa, ni, di) - if spkmax > Neurons.Value(SpkMax, ni, di) { - Neurons.Set(spkmax, SpkMax, ni, di) + Neurons.SetAdd(ly.Learn.CaSpk.Dt.PDt*(Neurons[CaSpkM, ni, di]-Neurons[SpkMaxCa, ni, di]), int(SpkMaxCa), int(ni), int(di)) + spkmax := Neurons.Value(int(SpkMaxCa), int(ni), int(di)) + if spkmax > Neurons.Value(int(SpkMax), int(ni), int(di)) { + Neurons.Set(spkmax, int(SpkMax), int(ni), int(di)) } } spksper := ctx.ThetaCycles / 8 bin := ctx.Cycle / spksper - spk := Neurons.Value(Spike, ni, di) + spk := Neurons.Value(int(Spike), int(ni), int(di)) switch bin { case 0: - Neurons.SetAdd(spk, SpkBin0, ni, di) + Neurons.SetAdd(spk, int(SpkBin0), int(ni), int(di)) case 1: - Neurons.SetAdd(spk, SpkBin1, ni, di) + Neurons.SetAdd(spk, int(SpkBin1), int(ni), int(di)) case 2: - Neurons.SetAdd(spk, SpkBin2, ni, di) + Neurons.SetAdd(spk, int(SpkBin2), int(ni), int(di)) case 3: - Neurons.SetAdd(spk, SpkBin3, ni, di) + Neurons.SetAdd(spk, int(SpkBin3), int(ni), int(di)) case 4: - Neurons.SetAdd(spk, SpkBin4, ni, di) + Neurons.SetAdd(spk, int(SpkBin4), int(ni), int(di)) case 5: - Neurons.SetAdd(spk, SpkBin5, ni, di) + Neurons.SetAdd(spk, int(SpkBin5), int(ni), int(di)) case 6: - Neurons.SetAdd(spk, SpkBin6, ni, di) + Neurons.SetAdd(spk, int(SpkBin6), int(ni), int(di)) default: - Neurons.SetAdd(spk, SpkBin7, ni, di) + Neurons.SetAdd(spk, int(SpkBin7), int(ni), int(di)) } } @@ -733,7 +733,7 @@ func (ly *LayerParams) SpikeFromG(ctx *Context, ni, di uint32, lpl *Pool) { // This is where special layer types add extra code. // warning: if more than 1 layer writes to vals, gpu will fail! func (ly *LayerParams) PostSpikeSpecial(ctx *Context, ni, di uint32, pl *Pool, lpl *Pool, vals *LayerValues) { - Neurons.Set(Neurons[CaSpkP, ni, di], Burst, ni, di) + Neurons.Set(Neurons[CaSpkP, ni, di], int(Burst), int(ni), int(di)) pi := NrnI(ctx, ni, NrnSubPool) - 1 // 0-n pool index pni := NrnI(ctx, ni, NrnNeurIndex) - pl.StIndex hasRew := GlbV(ctx, di, GvHasRew) > 0 @@ -743,8 +743,8 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, ni, di uint32, pl *Pool, l actMax := lpl.AvgMax.CaSpkP.Cycle.Max actAvg := lpl.AvgMax.CaSpkP.Cycle.Avg thr := ly.Bursts.ThrFromAvgMax(actAvg, actMax) - if Neurons.Value(CaSpkP, ni, di) < thr { - Neurons.Set(0, Burst, ni, di) + if Neurons.Value(int(CaSpkP), int(ni), int(di)) < thr { + Neurons.Set(0, int(Burst), int(ni), int(di)) } } case CTLayer: @@ -752,11 +752,11 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, ni, di uint32, pl *Pool, l case PTPredLayer: if ctx.Cycle == ctx.ThetaCycles-1 { if ly.CT.DecayTau == 0 { - Neurons.Set(Neurons[CtxtGeRaw, ni, di], CtxtGe, ni, di) + Neurons.Set(Neurons[CtxtGeRaw, ni, di], int(CtxtGe), int(ni), int(di)) } else { - Neurons.SetAdd(Neurons[CtxtGeRaw, ni, di], CtxtGe, ni, di) + Neurons.SetAdd(Neurons[CtxtGeRaw, ni, di], int(CtxtGe), int(ni), int(di)) } - Neurons.Set(Neurons[CtxtGe, ni, di], CtxtGeOrig, ni, di) + Neurons.Set(Neurons[CtxtGe, ni, di], int(CtxtGeOrig), int(ni), int(di)) } case VSGatedLayer: dr := float32(0) @@ -765,39 +765,39 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, ni, di uint32, pl *Pool, l } else { dr = GlbV(ctx, di, GvVSMatrixHasGated) } - Neurons.Set(dr, Act, ni, di) + Neurons.Set(dr, int(Act), int(ni), int(di)) case BLALayer: if ctx.Cycle == ctx.ThetaCycles-1 { if hasRew { - Neurons.Set(0, CtxtGe, ni, di) - Neurons.Set(0, CtxtGeOrig, ni, di) + Neurons.Set(0, int(CtxtGe), int(ni), int(di)) + Neurons.Set(0, int(CtxtGeOrig), int(ni), int(di)) } else if GlbV(ctx, di, GvACh) > 0.1 { - Neurons.Set(Neurons[CtxtGeRaw, ni, di], CtxtGe, ni, di) - Neurons.Set(Neurons[CtxtGe, ni, di], CtxtGeOrig, ni, di) + Neurons.Set(Neurons[CtxtGeRaw, ni, di], int(CtxtGe), int(ni), int(di)) + Neurons.Set(Neurons[CtxtGe, ni, di], int(CtxtGeOrig), int(ni), int(di)) } } case LHbLayer: if pni == 0 { - Neurons.Set(GlbV(ctx, di, GvLHbDip), Act, ni, di) + Neurons.Set(GlbV(ctx, di, GvLHbDip), int(Act), int(ni), int(di)) } else { - Neurons.Set(GlbV(ctx, di, GvLHbBurst), Act, ni, di) + Neurons.Set(GlbV(ctx, di, GvLHbBurst), int(Act), int(ni), int(di)) } - Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(Neurons[GeRaw, ni, di]), GeSyn, ni, di) + Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(Neurons[GeRaw, ni, di]), int(GeSyn), int(ni), int(di)) case DrivesLayer: dr := GlbUSposV(ctx, di, GvDrives, uint32(pi)) act := dr if dr > 0 { act = ly.Acts.PopCode.EncodeValue(pni, uint32(pl.NNeurons()), dr) } - Neurons.Set(act, Act, ni, di) + Neurons.Set(act, int(Act), int(ni), int(di)) case UrgencyLayer: ur := GlbV(ctx, di, GvUrgency) act := ur if ur > 0 { act = ly.Acts.PopCode.EncodeValue(pni, uint32(pl.NNeurons()), ur) } - Neurons.Set(act, Act, ni, di) + Neurons.Set(act, int(Act), int(ni), int(di)) case USLayer: us := RubiconUSStimValue(ctx, di, pi, ly.Learn.NeuroMod.Valence) act := us @@ -806,7 +806,7 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, ni, di uint32, pl *Pool, l } // D2Mod = final if ly.Learn.NeuroMod.DAMod == D1Mod || (ly.Learn.NeuroMod.DAMod == D2Mod && hasRew && ctx.PlusPhase.IsTrue()) { - Neurons.Set(act, Act, ni, di) + Neurons.Set(act, int(Act), int(ni), int(di)) } case PVLayer: if hasRew { @@ -817,41 +817,41 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, ni, di uint32, pl *Pool, l pv = GlbV(ctx, di, GvPVneg) } act := ly.Acts.PopCode.EncodeValue(pni, ly.Indexes.NeurN, pv) - Neurons.Set(act, Act, ni, di) + Neurons.Set(act, int(Act), int(ni), int(di)) } case LDTLayer: // I set this in CyclePost - Neurons.Set(GlbV(ctx, di, GvAChRaw), Act, ni, di) + Neurons.Set(GlbV(ctx, di, GvAChRaw), int(Act), int(ni), int(di)) case VTALayer: // I set this in CyclePost - Neurons.Set(GlbV(ctx, di, GvVtaDA), Act, ni, di) + Neurons.Set(GlbV(ctx, di, GvVtaDA), int(Act), int(ni), int(di)) case RewLayer: - Neurons.Set(GlbV(ctx, di, GvRew), Act, ni, di) + Neurons.Set(GlbV(ctx, di, GvRew), int(Act), int(ni), int(di)) case RWPredLayer: // clipped linear - Neurons.Set(ly.RWPred.PredRange.ClipValue(Neurons[Ge, ni, di]), Act, ni, di) + Neurons.Set(ly.RWPred.PredRange.ClipValue(Neurons[Ge, ni, di]), int(Act), int(ni), int(di)) if pni == 0 { - vals.Special.V1 = Neurons.Value(ActInt, ni, di) // warning: if more than 1 layer writes to vals, gpu will fail! + vals.Special.V1 = Neurons.Value(int(ActInt), int(ni), int(di)) // warning: if more than 1 layer writes to vals, gpu will fail! } else { - vals.Special.V2 = Neurons.Value(ActInt, ni, di) + vals.Special.V2 = Neurons.Value(int(ActInt), int(ni), int(di)) } case RWDaLayer: // I set this in CyclePost - Neurons.Set(GlbV(ctx, di, GvDA), Act, ni, di) + Neurons.Set(GlbV(ctx, di, GvDA), int(Act), int(ni), int(di)) case TDPredLayer: // linear - Neurons.Set(Neurons[Ge, ni, di], Act, ni, di) + Neurons.Set(Neurons[Ge, ni, di], int(Act), int(ni), int(di)) if pni == 0 { - vals.Special.V1 = Neurons.Value(ActInt, ni, di) // warning: if more than 1 layer writes to vals, gpu will fail! + vals.Special.V1 = Neurons.Value(int(ActInt), int(ni), int(di)) // warning: if more than 1 layer writes to vals, gpu will fail! } else { - vals.Special.V2 = Neurons.Value(ActInt, ni, di) + vals.Special.V2 = Neurons.Value(int(ActInt), int(ni), int(di)) } case TDIntegLayer: - Neurons.Set(GlbV(ctx, di, GvRewPred), Act, ni, di) + Neurons.Set(GlbV(ctx, di, GvRewPred), int(Act), int(ni), int(di)) case TDDaLayer: // I set this in CyclePost - Neurons.Set(GlbV(ctx, di, GvDA), Act, ni, di) + Neurons.Set(GlbV(ctx, di, GvDA), int(Act), int(ni), int(di)) } } @@ -860,14 +860,14 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, ni, di uint32, pl *Pool, l // It also updates the CaSpkPCyc stats. func (ly *LayerParams) PostSpike(ctx *Context, ni, di uint32, pl *Pool, vals *LayerValues) { intdt := ly.Acts.Dt.IntDt - Neurons.SetAdd(intdt*(Neurons[Ge, ni, di]-Neurons[GeInt, ni, di]), GeInt, ni, di) - Neurons.SetAdd(intdt*(Neurons[GiSyn, ni, di]-Neurons[GiInt, ni, di]), GiInt, ni, di) + Neurons.SetAdd(intdt*(Neurons[Ge, ni, di]-Neurons[GeInt, ni, di]), int(GeInt), int(ni), int(di)) + Neurons.SetAdd(intdt*(Neurons[GiSyn, ni, di]-Neurons[GiInt, ni, di]), int(GiInt), int(ni), int(di)) // act int is reset at start of the plus phase -- needs faster integration: if ctx.PlusPhase.IsTrue() { intdt *= 3.0 } // using reg act here now - Neurons.SetAdd(intdt*(Neurons[Act, ni, di]-Neurons[ActInt, ni, di]), ActInt, ni, di) + Neurons.SetAdd(intdt*(Neurons[Act, ni, di]-Neurons[ActInt, ni, di]), int(ActInt), int(ni), int(di)) } ///////////////////////////////////////////////////////////////////////// @@ -987,19 +987,19 @@ func (ly *LayerParams) NewStatePool(ctx *Context, pl *Pool) { // NewStateNeuron handles all initialization at start of new input pattern. // Should already have presented the external input to the network at this point. func (ly *LayerParams) NewStateNeuron(ctx *Context, ni, di uint32, vals *LayerValues, pl *Pool) { - Neurons.Set(Neurons[Burst, ni, di], BurstPrv, ni, di) - Neurons.Set(Neurons[CaSpkD, ni, di], SpkPrv, ni, di) - Neurons.Set(0, SpkMax, ni, di) - Neurons.Set(0, SpkMaxCa, ni, di) - - Neurons.Set(0, SpkBin0, ni, di) - Neurons.Set(0, SpkBin1, ni, di) - Neurons.Set(0, SpkBin2, ni, di) - Neurons.Set(0, SpkBin3, ni, di) - Neurons.Set(0, SpkBin4, ni, di) - Neurons.Set(0, SpkBin5, ni, di) - Neurons.Set(0, SpkBin6, ni, di) - Neurons.Set(0, SpkBin7, ni, di) + Neurons.Set(Neurons[Burst, ni, di], int(BurstPrv), int(ni), int(di)) + Neurons.Set(Neurons[CaSpkD, ni, di], int(SpkPrv), int(ni), int(di)) + Neurons.Set(0, int(SpkMax), int(ni), int(di)) + Neurons.Set(0, int(SpkMaxCa), int(ni), int(di)) + + Neurons.Set(0, int(SpkBin0), int(ni), int(di)) + Neurons.Set(0, int(SpkBin1), int(ni), int(di)) + Neurons.Set(0, int(SpkBin2), int(ni), int(di)) + Neurons.Set(0, int(SpkBin3), int(ni), int(di)) + Neurons.Set(0, int(SpkBin4), int(ni), int(di)) + Neurons.Set(0, int(SpkBin5), int(ni), int(di)) + Neurons.Set(0, int(SpkBin6), int(ni), int(di)) + Neurons.Set(0, int(SpkBin7), int(ni), int(di)) ly.Acts.DecayState(ctx, ni, di, ly.Acts.Decay.Act, ly.Acts.Decay.Glong, ly.Acts.Decay.AHP) // Note: synapse-level Ca decay happens in DWt @@ -1023,21 +1023,21 @@ func (ly *LayerParams) AvgGeM(ctx *Context, vals *LayerValues, geIntMinusMax, gi // MinusPhaseNeuron does neuron level minus-phase updating func (ly *LayerParams) MinusPhaseNeuron(ctx *Context, ni, di uint32, pl *Pool, lpl *Pool, vals *LayerValues) { - Neurons.Set(Neurons[ActInt, ni, di], ActM, ni, di) - Neurons.Set(Neurons[CaSpkP, ni, di], CaSpkPM, ni, di) + Neurons.Set(Neurons[ActInt, ni, di], int(ActM), int(ni), int(di)) + Neurons.Set(Neurons[CaSpkP, ni, di], int(CaSpkPM), int(ni), int(di)) } // PlusPhaseStartNeuron does neuron level plus-phase start: // applies Target inputs as External inputs. func (ly *LayerParams) PlusPhaseStartNeuron(ctx *Context, ni, di uint32, pl *Pool, lpl *Pool, vals *LayerValues) { if NrnHasFlag(ctx, ni, di, NeuronHasTarg) { // will be clamped in plus phase - Neurons.Set(Neurons[Target, ni, di], Ext, ni, di) + Neurons.Set(Neurons[Target, ni, di], int(Ext), int(ni), int(di)) NrnSetFlag(ctx, ni, di, NeuronHasExt) // get fresh update on plus phase output acts - Neurons.Set(-1, ISI, ni, di) - Neurons.Set(-1, ISIAvg, ni, di) + Neurons.Set(-1, int(ISI), int(ni), int(di)) + Neurons.Set(-1, int(ISIAvg), int(ni), int(di)) // reset for plus phase - Neurons.Set(ly.Acts.Init.Act, ActInt, ni, di) + Neurons.Set(ly.Acts.Init.Act, int(ActInt), int(ni), int(di)) } } @@ -1051,9 +1051,9 @@ func (ly *LayerParams) PlusPhaseNeuronSpecial(ctx *Context, ni, di uint32, pl *P // PlusPhaseNeuron does neuron level plus-phase updating func (ly *LayerParams) PlusPhaseNeuron(ctx *Context, ni, di uint32, pl *Pool, lpl *Pool, vals *LayerValues) { - Neurons.Set(Neurons[ActInt, ni, di], ActP, ni, di) - nrnCaSpkP := Neurons.Value(CaSpkP, ni, di) - nrnCaSpkD := Neurons.Value(CaSpkD, ni, di) + Neurons.Set(Neurons[ActInt, ni, di], int(ActP), int(ni), int(di)) + nrnCaSpkP := Neurons.Value(int(CaSpkP), int(ni), int(di)) + nrnCaSpkD := Neurons.Value(int(CaSpkD), int(ni), int(di)) da := GlbV(ctx, di, GvDA) ach := GlbV(ctx, di, GvACh) mlr := ly.Learn.RLRate.RLRateSigDeriv(nrnCaSpkD, lpl.AvgMax.CaSpkD.Cycle.Max) @@ -1063,14 +1063,14 @@ func (ly *LayerParams) PlusPhaseNeuron(ctx *Context, ni, di uint32, pl *Pool, lp switch ly.LayType { case BLALayer: - dlr = ly.Learn.RLRate.RLRateDiff(nrnCaSpkP, Neurons.Value(SpkPrv, ni, di)) // delta on previous trial - if !ly.Learn.NeuroMod.IsBLAExt() && pl.StIndex == 0 { // first pool + dlr = ly.Learn.RLRate.RLRateDiff(nrnCaSpkP, Neurons.Value(int(SpkPrv), int(ni), int(di))) // delta on previous trial + if !ly.Learn.NeuroMod.IsBLAExt() && pl.StIndex == 0 { // first pool dlr = 0 // first pool is novelty / curiosity -- no learn } case VSPatchLayer: da = GlbV(ctx, di, GvVSPatchPosRPE) // our own personal modlr = ly.Learn.NeuroMod.LRMod(da, ach) - mlr = ly.Learn.RLRate.RLRateSigDeriv(Neurons.Value(SpkPrv, ni, di), 1) // note: don't have proper max here + mlr = ly.Learn.RLRate.RLRateSigDeriv(Neurons.Value(int(SpkPrv), int(ni), int(di)), 1) // note: don't have proper max here case MatrixLayer: if hasRew { // reward time mlr = 1 // don't use dig deriv @@ -1080,15 +1080,15 @@ func (ly *LayerParams) PlusPhaseNeuron(ctx *Context, ni, di uint32, pl *Pool, lp default: dlr = ly.Learn.RLRate.RLRateDiff(nrnCaSpkP, nrnCaSpkD) } - Neurons.Set(mlr*dlr*modlr, RLRate, ni, di) + Neurons.Set(mlr*dlr*modlr, int(RLRate), int(ni), int(di)) var tau float32 - sahpN := Neurons.Value(SahpN, ni, di) - nrnSaphCa := Neurons.Value(SahpCa, ni, di) + sahpN := Neurons.Value(int(SahpN), int(ni), int(di)) + nrnSaphCa := Neurons.Value(int(SahpCa), int(ni), int(di)) ly.Acts.Sahp.NinfTauFromCa(nrnSaphCa, &sahpN, &tau) nrnSaphCa = ly.Acts.Sahp.CaInt(nrnSaphCa, nrnCaSpkD) - Neurons.Set(sahpN, SahpN, ni, di) - Neurons.Set(nrnSaphCa, SahpCa, ni, di) - Neurons.Set(ly.Acts.Sahp.GsAHP(sahpN), Gsahp, ni, di) + Neurons.Set(sahpN, int(SahpN), int(ni), int(di)) + Neurons.Set(nrnSaphCa, int(SahpCa), int(ni), int(di)) + Neurons.Set(ly.Acts.Sahp.GsAHP(sahpN), int(Gsahp), int(ni), int(di)) } //gosl:end diff --git a/axon/learn.go b/axon/learn.go index b7bb4085..d36cb813 100644 --- a/axon/learn.go +++ b/axon/learn.go @@ -75,9 +75,9 @@ func (np *CaLrnParams) Update() { // and performs time-integration of VgccCa func (np *CaLrnParams) VgccCaFromSpike(ctx *Context, ni, di uint32) { if np.SpkVGCC.IsTrue() { - Neurons.Set(np.SpkVgccCa*Neurons[Spike, ni, di], VgccCa, ni, di) + Neurons.Set(np.SpkVgccCa*Neurons[Spike, ni, di], int(VgccCa), int(ni), int(di)) } - Neurons.SetAdd(Neurons[VgccCa, ni, di]-np.VgccDt*Neurons[VgccCaInt, ni, di], VgccCaInt, ni, di) + Neurons.SetAdd(Neurons[VgccCa, ni, di]-np.VgccDt*Neurons[VgccCaInt, ni, di], int(VgccCaInt), int(ni), int(di)) // Dt only affects decay, not rise time } @@ -86,11 +86,11 @@ func (np *CaLrnParams) VgccCaFromSpike(ctx *Context, ni, di uint32) { // perform its time-integration. func (np *CaLrnParams) CaLrns(ctx *Context, ni, di uint32) { np.VgccCaFromSpike(ctx, ni, di) - Neurons.Set(np.NormInv*(Neurons[NmdaCa, ni, di]+Neurons[VgccCaInt, ni, di]), CaLrn, ni, di) - Neurons.SetAdd(np.Dt.MDt*(Neurons[CaLrn, ni, di]-Neurons[NrnCaM, ni, di]), NrnCaM, ni, di) - Neurons.SetAdd(np.Dt.PDt*(Neurons[NrnCaM, ni, di]-Neurons[NrnCaP, ni, di]), NrnCaP, ni, di) - Neurons.SetAdd(np.Dt.DDt*(Neurons[NrnCaP, ni, di]-Neurons[NrnCaD, ni, di]), NrnCaD, ni, di) - Neurons.Set(Neurons[NrnCaP, ni, di]-Neurons[NrnCaD, ni, di], CaDiff, ni, di) + Neurons.Set(np.NormInv*(Neurons[NmdaCa, ni, di]+Neurons[VgccCaInt, ni, di]), int(CaLrn), int(ni), int(di)) + Neurons.SetAdd(np.Dt.MDt*(Neurons[CaLrn, ni, di]-Neurons[NrnCaM, ni, di]), int(NrnCaM), int(ni), int(di)) + Neurons.SetAdd(np.Dt.PDt*(Neurons[NrnCaM, ni, di]-Neurons[NrnCaP, ni, di]), int(NrnCaP), int(ni), int(di)) + Neurons.SetAdd(np.Dt.DDt*(Neurons[NrnCaP, ni, di]-Neurons[NrnCaD, ni, di]), int(NrnCaD), int(ni), int(di)) + Neurons.Set(Neurons[NrnCaP, ni, di]-Neurons[NrnCaD, ni, di], int(CaDiff), int(ni), int(di)) } ////////////////////////////////////////////////////////////////////////////////////// @@ -304,23 +304,23 @@ func (ln *LearnNeurParams) Defaults() { // InitCaLrnSpk initializes the neuron-level calcium learning and spking variables. // Called by InitWeights (at start of learning). func (ln *LearnNeurParams) InitNeurCa(ctx *Context, ni, di uint32) { - Neurons.Set(0, GnmdaLrn, ni, di) - Neurons.Set(0, NmdaCa, ni, di) + Neurons.Set(0, int(GnmdaLrn), int(ni), int(di)) + Neurons.Set(0, int(NmdaCa), int(ni), int(di)) - Neurons.Set(0, VgccCa, ni, di) - Neurons.Set(0, VgccCaInt, ni, di) + Neurons.Set(0, int(VgccCa), int(ni), int(di)) + Neurons.Set(0, int(VgccCaInt), int(ni), int(di)) - Neurons.Set(0, CaLrn, ni, di) + Neurons.Set(0, int(CaLrn), int(ni), int(di)) - Neurons.Set(0, CaSpkM, ni, di) - Neurons.Set(0, CaSpkP, ni, di) - Neurons.Set(0, CaSpkD, ni, di) - Neurons.Set(0, CaSpkPM, ni, di) + Neurons.Set(0, int(CaSpkM), int(ni), int(di)) + Neurons.Set(0, int(CaSpkP), int(ni), int(di)) + Neurons.Set(0, int(CaSpkD), int(ni), int(di)) + Neurons.Set(0, int(CaSpkPM), int(ni), int(di)) - Neurons.Set(0, NrnCaM, ni, di) - Neurons.Set(0, NrnCaP, ni, di) - Neurons.Set(0, NrnCaD, ni, di) - Neurons.Set(0, CaDiff, ni, di) + Neurons.Set(0, int(NrnCaM), int(ni), int(di)) + Neurons.Set(0, int(NrnCaP), int(ni), int(di)) + Neurons.Set(0, int(NrnCaD), int(ni), int(di)) + Neurons.Set(0, int(CaDiff), int(ni), int(di)) } // LrnNMDAFromRaw updates the separate NMDA conductance and calcium values @@ -331,23 +331,23 @@ func (ln *LearnNeurParams) LrnNMDAFromRaw(ctx *Context, ni, di uint32, geTot flo if geTot < 0 { geTot = 0 } - vmd := Neurons.Value(VmDend, ni, di) - Neurons.Set(ln.LrnNMDA.NMDASyn(Neurons[GnmdaLrn, ni, di], geTot), GnmdaLrn, ni, di) - gnmda := ln.LrnNMDA.Gnmda(Neurons.Value(GnmdaLrn, ni, di), vmd) - Neurons.Set(gnmda*ln.LrnNMDA.CaFromV(vmd), NmdaCa, ni, di) + vmd := Neurons.Value(int(VmDend), int(ni), int(di)) + Neurons.Set(ln.LrnNMDA.NMDASyn(Neurons[GnmdaLrn, ni, di], geTot), int(GnmdaLrn), int(ni), int(di)) + gnmda := ln.LrnNMDA.Gnmda(Neurons.Value(int(GnmdaLrn), int(ni), int(di)), vmd) + Neurons.Set(gnmda*ln.LrnNMDA.CaFromV(vmd), int(NmdaCa), int(ni), int(di)) } // CaFromSpike updates all spike-driven calcium variables, including CaLrn and CaSpk. // Computed after new activation for current cycle is updated. func (ln *LearnNeurParams) CaFromSpike(ctx *Context, ni, di uint32) { var caSyn float32 - caSpkM := Neurons.Value(CaSpkM, ni, di) - caSpkP := Neurons.Value(CaSpkP, ni, di) - caSpkD := Neurons.Value(CaSpkD, ni, di) - ln.CaSpk.CaFromSpike(Neurons.Value(Spike, ni, di), &caSyn, &caSpkM, &caSpkP, &caSpkD) - Neurons.Set(caSpkM, CaSpkM, ni, di) - Neurons.Set(caSpkP, CaSpkP, ni, di) - Neurons.Set(caSpkD, CaSpkD, ni, di) + caSpkM := Neurons.Value(int(CaSpkM), int(ni), int(di)) + caSpkP := Neurons.Value(int(CaSpkP), int(ni), int(di)) + caSpkD := Neurons.Value(int(CaSpkD), int(ni), int(di)) + ln.CaSpk.CaFromSpike(Neurons.Value(int(Spike), int(ni), int(di)), &caSyn, &caSpkM, &caSpkP, &caSpkD) + Neurons.Set(caSpkM, int(CaSpkM), int(ni), int(di)) + Neurons.Set(caSpkP, int(CaSpkP), int(ni), int(di)) + Neurons.Set(caSpkD, int(CaSpkD), int(ni), int(di)) ln.CaLearn.CaLrns(ctx, ni, di) } @@ -596,14 +596,14 @@ func (sp *SWtParams) WtFromDWt(wt, lwt *float32, dwt, swt float32) { func (sp *SWtParams) InitWeightsSyn(ctx *Context, syni uint32, rnd randx.Rand, mean, spct float32) { wtv := sp.Init.RandVar(rnd) wt := mean + wtv - Synapses.Set(wt, Wt, syni) - Synapses.Set(sp.ClipSWt(mean+spct*wtv), SWt, syni) + Synapses.Set(wt, int(Wt), int(syni)) + Synapses.Set(sp.ClipSWt(mean+spct*wtv), int(SWt), int(syni)) if spct == 0 { // this is critical for weak init wt, SPCt = 0 paths - Synapses.Set(0.5, SWt, syni) + Synapses.Set(0.5, int(SWt), int(syni)) } - Synapses.Set(sp.LWtFromWts(wt, Synapses[SWt, syni]), LWt, syni) - Synapses.Set(0, DWt, syni) - Synapses.Set(0, DSWt, syni) + Synapses.Set(sp.LWtFromWts(wt, Synapses[SWt, syni]), int(LWt), int(syni)) + Synapses.Set(0, int(DWt), int(syni)) + Synapses.Set(0, int(DSWt), int(syni)) } //gosl:start diff --git a/axon/logging.go b/axon/logging.go index d2ae212f..218c90f9 100644 --- a/axon/logging.go +++ b/axon/logging.go @@ -4,6 +4,7 @@ package axon +/* import ( "reflect" "strconv" @@ -14,8 +15,6 @@ import ( "cogentcore.org/core/plot/plotcore" "cogentcore.org/core/tensor" "cogentcore.org/core/tensor/stats/metric" - "cogentcore.org/core/tensor/stats/norm" - "cogentcore.org/core/tensor/stats/split" "cogentcore.org/core/tensor/stats/stats" "cogentcore.org/core/tensor/table" "github.com/emer/emergent/v2/egui" @@ -34,10 +33,10 @@ func LogTestErrors(lg *elog.Logs) { }) lg.MiscTables["TestErrors"] = ix.NewTable() - allsp := split.All(ix) - split.AggColumn(allsp, "UnitErr", stats.Sum) - // note: can add other stats to compute - lg.MiscTables["TestErrorStats"] = allsp.AggsToTable(table.AddAggName) + // allsp := split.All(ix) // todo: + // split.AggColumn(allsp, "UnitErr", stats.Sum) + // // note: can add other stats to compute + // lg.MiscTables["TestErrorStats"] = allsp.AggsToTable(table.AddAggName) } // PCAStats computes PCA statistics on recorded hidden activation patterns @@ -389,7 +388,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t Write: elog.WriteMap{ etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) { tsr := ctx.GetLayerSampleTensor(clnm, "NmdaCa") - ctx.SetFloat64(stats.MeanTensor(tsr)) + ctx.SetFloat64(stats.Mean(tsr)) }}}) lg.AddStdAggs(itm, mode, times...) @@ -401,7 +400,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t Write: elog.WriteMap{ etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) { tsr := ctx.GetLayerSampleTensor(clnm, "NmdaCa") - ctx.SetFloat64(stats.MeanTensor(tsr)) + ctx.SetFloat64(stats.Mean(tsr)) }}}) lg.AddStdAggs(itm, mode, times...) @@ -413,7 +412,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t Write: elog.WriteMap{ etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) { tsr := ctx.GetLayerSampleTensor(clnm, "VgccCaInt") - ctx.SetFloat64(stats.MeanTensor(tsr)) + ctx.SetFloat64(stats.Mean(tsr)) }}}) lg.AddStdAggs(itm, mode, times...) @@ -437,7 +436,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t Write: elog.WriteMap{ etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) { tsr := ctx.GetLayerSampleTensor(clnm, "CaLrn") - ctx.SetFloat64(stats.MeanTensor(tsr)) + ctx.SetFloat64(stats.Mean(tsr)) }}}) lg.AddStdAggs(itm, mode, times...) @@ -449,7 +448,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t Write: elog.WriteMap{ etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) { tsr := ctx.GetLayerSampleTensor(clnm, "CaLrn") - ctx.SetFloat64(stats.MaxTensor(tsr)) + ctx.SetFloat64(stats.Max(tsr)) }}}) lg.AddStdAggs(itm, mode, times...) @@ -461,8 +460,8 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t Write: elog.WriteMap{ etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) { tsr := ctx.GetLayerSampleTensor(clnm, "CaDiff") - norm.AbsTensor(tsr) - ctx.SetFloat64(stats.MeanTensor(tsr)) + tmath.Abs(tsr) + ctx.SetFloat64(stats.Mean(tsr)) }}}) lg.AddStdAggs(itm, mode, times...) @@ -474,8 +473,8 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t Write: elog.WriteMap{ etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) { tsr := ctx.GetLayerSampleTensor(clnm, "CaDiff") - norm.AbsTensor(tsr) - ctx.SetFloat64(stats.MaxTensor(tsr)) + tmath.Abs(tsr) + ctx.SetFloat64(stats.Max(tsr)) }}}) lg.AddStdAggs(itm, mode, times...) @@ -487,7 +486,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t Write: elog.WriteMap{ etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) { tsr := ctx.GetLayerSampleTensor(clnm, "CaD") - ctx.SetFloat64(stats.MeanTensor(tsr)) + ctx.SetFloat64(stats.Mean(tsr)) }}}) lg.AddStdAggs(itm, mode, times...) @@ -499,7 +498,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t Write: elog.WriteMap{ etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) { tsr := ctx.GetLayerSampleTensor(clnm, "CaSpkD") - avg := stats.MeanTensor(tsr) + avg := stats.Mean(tsr) ctx.SetFloat64(avg) }}}) lg.AddStdAggs(itm, mode, times...) @@ -511,7 +510,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t Write: elog.WriteMap{ etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) { tsr := ctx.GetLayerSampleTensor(clnm, "CaDiff") - avg := stats.MeanTensor(tsr) + avg := stats.Mean(tsr) ctx.SetFloat64(avg) }}}) lg.AddStdAggs(itm, mode, times...) @@ -681,9 +680,9 @@ func LayerActsLogAvg(net *Network, lg *elog.Logs, gui *egui.GUI, recReset bool) return } ix := table.NewIndexView(dtRec) - spl := split.GroupBy(ix, "Layer") - split.AggAllNumericColumns(spl, stats.Mean) - ags := spl.AggsToTable(table.ColumnNameOnly) + // spl := split.GroupBy(ix, "Layer") // todo: + // split.AggAllNumericColumns(spl, stats.Mean) + // ags := spl.AggsToTable(table.ColumnNameOnly) cols := []string{"Nominal", "ActM", "ActP", "MaxGeM", "MaxGeP"} for li, ly := range net.Layers { rw := errors.Log1(ags.RowsByString("Layer", ly.Name, table.Equals, table.UseCase))[0] @@ -718,3 +717,5 @@ func LayerActsLogConfigGUI(lg *elog.Logs, gui *egui.GUI) { gui.Plots["LayerActsAvg"] = plt plt.SetTable(lg.MiscTables["LayerActsAvg"]) } + +*/ diff --git a/axon/looper.go b/axon/looper.go index 0ce811f1..8af3412b 100644 --- a/axon/looper.go +++ b/axon/looper.go @@ -6,7 +6,6 @@ package axon import ( "github.com/emer/emergent/v2/egui" - "github.com/emer/emergent/v2/elog" "github.com/emer/emergent/v2/etime" "github.com/emer/emergent/v2/looper" "github.com/emer/emergent/v2/netview" @@ -99,6 +98,7 @@ func LooperSimCycleAndLearn(man *looper.Manager, net *Network, ctx *Context, vie } } +/* // LooperResetLogBelow adds a function in OnStart to all stacks and loops // to reset the log at the level below each loop -- this is good default behavior. // Exceptions can be passed to exclude specific levels -- e.g., if except is Epoch @@ -122,6 +122,7 @@ func LooperResetLogBelow(man *looper.Manager, logs *elog.Logs, except ...etime.T } } } +*/ // LooperUpdateNetView adds netview update calls at each time level func LooperUpdateNetView(man *looper.Manager, viewupdt *netview.ViewUpdate, net *Network, ctrUpdateFunc func(tm etime.Times)) { diff --git a/axon/path.go b/axon/path.go index b222e694..d9c9075c 100644 --- a/axon/path.go +++ b/axon/path.go @@ -92,11 +92,11 @@ func (pt *Path) SetSynValue(varNm string, sidx, ridx int, val float32) error { } } if varNm == "Wt" { - wt := Synapses.Value(Wt, syni) - if Synapses.Value(SWt, syni) == 0 { - Synapses.Set(wt, SWt, syni) + wt := Synapses.Value(int(Wt), int(syni)) + if Synapses.Value(int(SWt), int(syni)) == 0 { + Synapses.Set(wt, int(SWt), int(syni)) } - Synapses.Set(pt.Params.SWts.LWtFromWts(wt, Synapses[SWt, syni]), LWt, syni) + Synapses.Set(pt.Params.SWts.LWtFromWts(wt, Synapses[SWt, syni]), int(LWt), int(syni)) } return nil } @@ -139,10 +139,10 @@ func (pt *Path) SetSWtsRPool(ctx *Context, swts tensor.Tensor) { for ci, syi := range syIndexes { syni := pt.SynStIndex + syi swt := float32(swts.Float1D((scst + ci) % wsz)) - Synapses.Set(float32(swt), SWt, syni) - wt := pt.Params.SWts.ClipWt(swt + (Synapses.Value(Wt, syni) - pt.Params.SWts.Init.Mean)) - Synapses.Set(wt, Wt, syni) - Synapses.Set(pt.Params.SWts.LWtFromWts(wt, swt), LWt, syni) + Synapses.Set(float32(swt), int(SWt), int(syni)) + wt := pt.Params.SWts.ClipWt(swt + (Synapses.Value(int(Wt), int(syni)) - pt.Params.SWts.Init.Mean)) + Synapses.Set(wt, int(Wt), int(syni)) + Synapses.Set(pt.Params.SWts.LWtFromWts(wt, swt), int(LWt), int(syni)) } } } @@ -164,9 +164,9 @@ func (pt *Path) SetWeightsFunc(ctx *Context, wtFun func(si, ri int, send, recv * syni := pt.SynStIndex + syi si := pt.Params.SynSendLayerIndex(ctx, syni) wt := wtFun(int(si), ri, ssh, rsh) - Synapses.Set(wt, SWt, syni) - Synapses.Set(wt, Wt, syni) - Synapses.Set(0.5, LWt, syni) + Synapses.Set(wt, int(SWt), int(syni)) + Synapses.Set(wt, int(Wt), int(syni)) + Synapses.Set(0.5, int(LWt), int(syni)) } } } @@ -184,10 +184,10 @@ func (pt *Path) SetSWtsFunc(ctx *Context, swtFun func(si, ri int, send, recv *te syni := pt.SynStIndex + syi si := int(pt.Params.SynSendLayerIndex(ctx, syni)) swt := swtFun(si, ri, ssh, rsh) - Synapses.Set(swt, SWt, syni) - wt := pt.Params.SWts.ClipWt(swt + (Synapses.Value(Wt, syni) - pt.Params.SWts.Init.Mean)) - Synapses.Set(wt, Wt, syni) - Synapses.Set(pt.Params.SWts.LWtFromWts(wt, swt), LWt, syni) + Synapses.Set(swt, int(SWt), int(syni)) + wt := pt.Params.SWts.ClipWt(swt + (Synapses.Value(int(Wt), int(syni)) - pt.Params.SWts.Init.Mean)) + Synapses.Set(wt, int(Wt), int(syni)) + Synapses.Set(pt.Params.SWts.LWtFromWts(wt, swt), int(LWt), int(syni)) } } } @@ -247,7 +247,7 @@ func (pt *Path) SWtRescale(ctx *Context) { } for _, syi := range syIndexes { syni := pt.SynStIndex + syi - swt := Synapses.Value(SWt, syni) + swt := Synapses.Value(int(SWt), int(syni)) sum += swt if swt <= pt.Params.SWts.Limit.Min { nmin++ @@ -267,10 +267,10 @@ func (pt *Path) SWtRescale(ctx *Context) { } for _, syi := range syIndexes { syni := pt.SynStIndex + syi - if Synapses.Value(SWt, syni) <= pt.Params.SWts.Limit.Max { - swt := pt.Params.SWts.ClipSWt(Synapses.Value(SWt, syni) + mdf) - Synapses.Set(swt, SWt, syni) - Synapses.Set(pt.Params.SWts.WtValue(swt, Synapses[LWt, syni]), Wt, syni) + if Synapses.Value(int(SWt), int(syni)) <= pt.Params.SWts.Limit.Max { + swt := pt.Params.SWts.ClipSWt(Synapses.Value(int(SWt), int(syni)) + mdf) + Synapses.Set(swt, int(SWt), int(syni)) + Synapses.Set(pt.Params.SWts.WtValue(swt, Synapses[LWt, syni]), int(Wt), int(syni)) } } } else { @@ -280,10 +280,10 @@ func (pt *Path) SWtRescale(ctx *Context) { } for _, syi := range syIndexes { syni := pt.SynStIndex + syi - if Synapses.Value(SWt, syni) >= pt.Params.SWts.Limit.Min { - swt := pt.Params.SWts.ClipSWt(Synapses.Value(SWt, syni) + mdf) - Synapses.Set(swt, SWt, syni) - Synapses.Set(pt.Params.SWts.WtValue(swt, Synapses[LWt, syni]), Wt, syni) + if Synapses.Value(int(SWt), int(syni)) >= pt.Params.SWts.Limit.Min { + swt := pt.Params.SWts.ClipSWt(Synapses.Value(int(SWt), int(syni)) + mdf) + Synapses.Set(swt, int(SWt), int(syni)) + Synapses.Set(pt.Params.SWts.WtValue(swt, Synapses[LWt, syni]), int(Wt), int(syni)) } } } @@ -339,9 +339,9 @@ func (pt *Path) InitWtSym(ctx *Context, rpj *Path) { recipSyni := rpj.SynStIndex + recipCi recipRi := rpj.Params.SynRecvLayerIndex(ctx, recipSyni) if recipRi == lni { - SetSynV(ctx, recipSyni, Wt, Synapses.Value(Wt, syni)) - SetSynV(ctx, recipSyni, LWt, Synapses.Value(LWt, syni)) - SetSynV(ctx, recipSyni, SWt, Synapses.Value(SWt, syni)) + SetSynV(ctx, recipSyni, Wt, Synapses.Value(int(Wt), int(syni))) + SetSynV(ctx, recipSyni, LWt, Synapses.Value(int(LWt), int(syni))) + SetSynV(ctx, recipSyni, SWt, Synapses.Value(int(SWt), int(syni))) // note: if we support SymFromTop then can have option to go other way break } @@ -353,9 +353,9 @@ func (pt *Path) InitWtSym(ctx *Context, rpj *Path) { recipSyni := rpj.SynStIndex + recipCi recipRi := rpj.Params.SynRecvLayerIndex(ctx, recipSyni) if recipRi == lni { - SetSynV(ctx, recipSyni, Wt, Synapses.Value(Wt, syni)) - SetSynV(ctx, recipSyni, LWt, Synapses.Value(LWt, syni)) - SetSynV(ctx, recipSyni, SWt, Synapses.Value(SWt, syni)) + SetSynV(ctx, recipSyni, Wt, Synapses.Value(int(Wt), int(syni))) + SetSynV(ctx, recipSyni, LWt, Synapses.Value(int(LWt), int(syni))) + SetSynV(ctx, recipSyni, SWt, Synapses.Value(int(SWt), int(syni))) // note: if we support SymFromTop then can have option to go other way break } diff --git a/axon/path_compute.go b/axon/path_compute.go index 23305612..592a2b72 100644 --- a/axon/path_compute.go +++ b/axon/path_compute.go @@ -27,9 +27,9 @@ func (pj *Path) SendSpike(ctx *Context, ni, di, maxData uint32) { if ctx.Cycle != ctx.ThetaCycles-1-int32(pj.Params.Com.DelLen) { return } - scale *= Neurons.Value(Burst, ni, di) // Burst is regular CaSpkP for all non-SuperLayer neurons + scale *= Neurons.Value(int(Burst), int(ni), int(di)) // Burst is regular CaSpkP for all non-SuperLayer neurons } else { - if Neurons.Value(Spike, ni, di) == 0 { + if Neurons.Value(int(Spike), int(ni), int(di)) == 0 { return } } @@ -39,7 +39,7 @@ func (pj *Path) SendSpike(ctx *Context, ni, di, maxData uint32) { for syi := scon.Start; syi < scon.Start+scon.N; syi++ { syni := pj.SynStIndex + syi recvIndex := pj.Params.SynRecvLayerIndex(ctx, syni) // note: layer-specific is ok here - sv := int32(scale * Synapses.Value(Wt, syni)) + sv := int32(scale * Synapses.Value(int(Wt), int(syni))) bi := pjcom.WriteIndexOff(recvIndex, di, wrOff, pj.Params.Indexes.RecvNeurN, maxData) atomic.AddInt32(&pj.GBuf[bi], sv) } @@ -69,7 +69,7 @@ func (pj *Path) DWt(ctx *Context, si uint32) { dwt += SynCaV(ctx, syni, di, DiDWt) } // note: on GPU, this must be a separate kernel, but can be combined here - Synapses.SetAdd(dwt, DWt, syni) + Synapses.SetAdd(dwt, int(DWt), int(syni)) } } @@ -91,7 +91,7 @@ func (pj *Path) DWtSubMean(ctx *Context, ri uint32) { nnz := 0 // non-zero for _, syi := range syIndexes { syni := pj.SynStIndex + syi - dw := Synapses.Value(DWt, syni) + dw := Synapses.Value(int(DWt), int(syni)) if dw != 0 { sumDWt += dw nnz++ @@ -103,8 +103,8 @@ func (pj *Path) DWtSubMean(ctx *Context, ri uint32) { sumDWt /= float32(nnz) for _, syi := range syIndexes { syni := pj.SynStIndex + syi - if Synapses.Value(DWt, syni) != 0 { - Synapses.SetAdd(-sm*sumDWt, DWt, syni) + if Synapses.Value(int(DWt), int(syni)) != 0 { + Synapses.SetAdd(-sm*sumDWt, int(DWt), int(syni)) } } } @@ -152,29 +152,29 @@ func (pj *Path) SWtFromWt(ctx *Context) { avgDWt := float32(0) for _, syi := range syIndexes { syni := pj.SynStIndex + syi - swt := Synapses.Value(SWt, syni) + swt := Synapses.Value(int(SWt), int(syni)) // softbound for SWt - if Synapses.Value(DSWt, syni) >= 0 { - Synapses.SetMul((mx - swt), DSWt, syni) + if Synapses.Value(int(DSWt), int(syni)) >= 0 { + Synapses.SetMul((mx - swt), int(DSWt), int(syni)) } else { - Synapses.SetMul((swt - mn), DSWt, syni) + Synapses.SetMul((swt - mn), int(DSWt), int(syni)) } - avgDWt += Synapses.Value(DSWt, syni) + avgDWt += Synapses.Value(int(DSWt), int(syni)) } avgDWt /= float32(nCons) avgDWt *= pj.Params.SWts.Adapt.SubMean for _, syi := range syIndexes { syni := pj.SynStIndex + syi - Synapses.SetAdd(lr*(Synapses[DSWt, syni]-avgDWt), SWt, syni) - swt := Synapses.Value(SWt, syni) - Synapses.Set(0, DSWt, syni) - if Synapses.Value(Wt, syni) == 0 { // restore failed wts - wt := pj.Params.SWts.WtValue(swt, Synapses.Value(LWt, syni)) - Synapses.Set(wt, Wt, syni) + Synapses.SetAdd(lr*(Synapses[DSWt, syni]-avgDWt), int(SWt), int(syni)) + swt := Synapses.Value(int(SWt), int(syni)) + Synapses.Set(0, int(DSWt), int(syni)) + if Synapses.Value(int(Wt), int(syni)) == 0 { // restore failed wts + wt := pj.Params.SWts.WtValue(swt, Synapses.Value(int(LWt), int(syni))) + Synapses.Set(wt, int(Wt), int(syni)) } // + pj.Params.SWts.Adapt.RandVar( - Synapses.Set(pj.Params.SWts.LWtFromWts(Synapses[Wt, syni], swt), LWt, syni) - Synapses.Set(pj.Params.SWts.WtValue(swt, Synapses[LWt, syni]), Wt, syni) + Synapses.Set(pj.Params.SWts.LWtFromWts(Synapses[Wt, syni], swt), int(LWt), int(syni)) + Synapses.Set(pj.Params.SWts.WtValue(swt, Synapses[LWt, syni]), int(Wt), int(syni)) } } } @@ -200,14 +200,14 @@ func (pj *Path) SynScale(ctx *Context) { syIndexes := pj.RecvSynIndexes(lni) for _, syi := range syIndexes { syni := pj.SynStIndex + syi - lwt := Synapses.Value(LWt, syni) - swt := Synapses.Value(SWt, syni) + lwt := Synapses.Value(int(LWt), int(syni)) + swt := Synapses.Value(int(SWt), int(syni)) if adif >= 0 { // key to have soft bounding on lwt here! - Synapses.SetAdd((1-lwt)*adif*swt, LWt, syni) + Synapses.SetAdd((1-lwt)*adif*swt, int(LWt), int(syni)) } else { - Synapses.SetAdd(lwt*adif*swt, LWt, syni) + Synapses.SetAdd(lwt*adif*swt, int(LWt), int(syni)) } - Synapses.Set(pj.Params.SWts.WtValue(swt, Synapses[LWt, syni]), Wt, syni) + Synapses.Set(pj.Params.SWts.WtValue(swt, Synapses[LWt, syni]), int(Wt), int(syni)) } } } @@ -220,9 +220,9 @@ func (pj *Path) SynFail(ctx *Context) { scon := pj.SendCon[lni] for syi := scon.Start; syi < scon.Start+scon.N; syi++ { syni := pj.SynStIndex + syi - swt := Synapses.Value(SWt, syni) - if Synapses.Value(Wt, syni) == 0 { // restore failed wts - Synapses.Set(pj.Params.SWts.WtValue(swt, Synapses[LWt, syni]), Wt, syni) + swt := Synapses.Value(int(SWt), int(syni)) + if Synapses.Value(int(Wt), int(syni)) == 0 { // restore failed wts + Synapses.Set(pj.Params.SWts.WtValue(swt, Synapses[LWt, syni]), int(Wt), int(syni)) } pj.Params.Com.Fail(ctx, syni, swt) } diff --git a/axon/pathparams.go b/axon/pathparams.go index 1fa46ffd..60c8aff4 100644 --- a/axon/pathparams.go +++ b/axon/pathparams.go @@ -253,22 +253,22 @@ func (pj *PathParams) GatherSpikes(ctx *Context, ly *LayerParams, ni, di uint32, switch pj.Com.GType { case ExcitatoryG: *gSyn = ly.Acts.Dt.GeSynFromRaw(*gSyn, gRaw) - Neurons.SetAdd(gRaw, GeRaw, ni, di) - Neurons.SetAdd(*gSyn, GeSyn, ni, di) + Neurons.SetAdd(gRaw, int(GeRaw), int(ni), int(di)) + Neurons.SetAdd(*gSyn, int(GeSyn), int(ni), int(di)) case InhibitoryG: *gSyn = ly.Acts.Dt.GiSynFromRaw(*gSyn, gRaw) - Neurons.SetAdd(gRaw, GiRaw, ni, di) - Neurons.SetAdd(*gSyn, GiSyn, ni, di) + Neurons.SetAdd(gRaw, int(GiRaw), int(ni), int(di)) + Neurons.SetAdd(*gSyn, int(GiSyn), int(ni), int(di)) case ModulatoryG: *gSyn = ly.Acts.Dt.GeSynFromRaw(*gSyn, gRaw) - Neurons.SetAdd(gRaw, GModRaw, ni, di) - Neurons.SetAdd(*gSyn, GModSyn, ni, di) + Neurons.SetAdd(gRaw, int(GModRaw), int(ni), int(di)) + Neurons.SetAdd(*gSyn, int(GModSyn), int(ni), int(di)) case MaintG: *gSyn = ly.Acts.Dt.GeSynFromRaw(*gSyn, gRaw) - Neurons.SetAdd(gRaw, GMaintRaw, ni, di) + Neurons.SetAdd(gRaw, int(GMaintRaw), int(ni), int(di)) // note: Syn happens via NMDA in Act case ContextG: - Neurons.SetAdd(gRaw, CtxtGeRaw, ni, di) + Neurons.SetAdd(gRaw, int(CtxtGeRaw), int(ni), int(di)) } } @@ -306,22 +306,22 @@ func (pj *PathParams) DWtSyn(ctx *Context, syni, si, ri, di uint32, layPool, sub // SynCa gets the synaptic calcium P (potentiation) and D (depression) // values, using optimized computation. func (pj *PathParams) SynCa(ctx *Context, si, ri, di uint32, syCaP, syCaD *float32) { - rb0 := Neurons.Value(SpkBin0, ri, di) - sb0 := Neurons.Value(SpkBin0, si, di) - rb1 := Neurons.Value(SpkBin1, ri, di) - sb1 := Neurons.Value(SpkBin1, si, di) - rb2 := Neurons.Value(SpkBin2, ri, di) - sb2 := Neurons.Value(SpkBin2, si, di) - rb3 := Neurons.Value(SpkBin3, ri, di) - sb3 := Neurons.Value(SpkBin3, si, di) - rb4 := Neurons.Value(SpkBin4, ri, di) - sb4 := Neurons.Value(SpkBin4, si, di) - rb5 := Neurons.Value(SpkBin5, ri, di) - sb5 := Neurons.Value(SpkBin5, si, di) - rb6 := Neurons.Value(SpkBin6, ri, di) - sb6 := Neurons.Value(SpkBin6, si, di) - rb7 := Neurons.Value(SpkBin7, ri, di) - sb7 := Neurons.Value(SpkBin7, si, di) + rb0 := Neurons.Value(int(SpkBin0), int(ri), int(di)) + sb0 := Neurons.Value(int(SpkBin0), int(si), int(di)) + rb1 := Neurons.Value(int(SpkBin1), int(ri), int(di)) + sb1 := Neurons.Value(int(SpkBin1), int(si), int(di)) + rb2 := Neurons.Value(int(SpkBin2), int(ri), int(di)) + sb2 := Neurons.Value(int(SpkBin2), int(si), int(di)) + rb3 := Neurons.Value(int(SpkBin3), int(ri), int(di)) + sb3 := Neurons.Value(int(SpkBin3), int(si), int(di)) + rb4 := Neurons.Value(int(SpkBin4), int(ri), int(di)) + sb4 := Neurons.Value(int(SpkBin4), int(si), int(di)) + rb5 := Neurons.Value(int(SpkBin5), int(ri), int(di)) + sb5 := Neurons.Value(int(SpkBin5), int(si), int(di)) + rb6 := Neurons.Value(int(SpkBin6), int(ri), int(di)) + sb6 := Neurons.Value(int(SpkBin6), int(si), int(di)) + rb7 := Neurons.Value(int(SpkBin7), int(ri), int(di)) + sb7 := Neurons.Value(int(SpkBin7), int(si), int(di)) b0 := 0.1 * (rb0 * sb0) b1 := 0.1 * (rb1 * sb1) @@ -344,16 +344,16 @@ func (pj *PathParams) DWtSynCortex(ctx *Context, syni, si, ri, di uint32, layPoo dtr := syCaD // delta trace, caD reflects entire window if pj.PathType == CTCtxtPath { // layer 6 CT pathway - dtr = Neurons.Value(BurstPrv, si, di) + dtr = Neurons.Value(int(BurstPrv), int(si), int(di)) } // save delta trace for GUI - SynapseTraces.Set(dtr, DTr, syni, di) + SynapseTraces.Set(dtr, int(DTr), int(syni), int(di)) // TrFromCa(prev-multiTrial Integrated Trace, deltaTrace), as a mixing func - tr := pj.Learn.Trace.TrFromCa(SynapseTraces.Value(Tr, syni, di), dtr) + tr := pj.Learn.Trace.TrFromCa(SynapseTraces.Value(int(Tr), int(syni), int(di)), dtr) // save new trace, updated w/ credit assignment (dependent on Tau in the TrFromCa function - SynapseTraces.Set(tr, Tr, syni, di) + SynapseTraces.Set(tr, int(Tr), int(syni), int(di)) // failed con, no learn - if Synapses.Value(Wt, syni) == 0 { + if Synapses.Value(int(Wt), int(syni)) == 0 { return } @@ -362,21 +362,21 @@ func (pj *PathParams) DWtSynCortex(ctx *Context, syni, si, ri, di uint32, layPoo if isTarget { err = syCaP - syCaD // for target layers, syn Ca drives error signal directly } else { - err = tr * (Neurons.Value(NrnCaP, ri, di) - Neurons.Value(NrnCaD, ri, di)) // hiddens: recv NMDA Ca drives error signal w/ trace credit + err = tr * (Neurons.Value(int(NrnCaP), int(ri), int(di)) - Neurons.Value(int(NrnCaD), int(ri), int(di))) // hiddens: recv NMDA Ca drives error signal w/ trace credit } // note: trace ensures that nothing changes for inactive synapses.. // sb immediately -- enters into zero sum. // also other types might not use, so need to do this per learning rule - lwt := Synapses.Value(LWt, syni) // linear weight + lwt := Synapses.Value(int(LWt), int(syni)) // linear weight if err > 0 { err *= (1 - lwt) } else { err *= lwt } if pj.PathType == CTCtxtPath { // rn.RLRate IS needed for other pathways, just not the context one - SynapseTraces.Set(pj.Learn.LRate.Eff*err, DiDWt, syni, di) + SynapseTraces.Set(pj.Learn.LRate.Eff*err, int(DiDWt), int(syni), int(di)) } else { - SynapseTraces.Set(Neurons[RLRate, ri, di]*pj.Learn.LRate.Eff*err, DiDWt, syni, di) + SynapseTraces.Set(Neurons[RLRate, ri, di]*pj.Learn.LRate.Eff*err, int(DiDWt), int(syni), int(di)) } } @@ -384,12 +384,12 @@ func (pj *PathParams) DWtSynCortex(ctx *Context, syni, si, ri, di uint32, layPoo // Uses synaptically integrated spiking, computed at the Theta cycle interval. // This is the trace version for hidden units, and uses syn CaP - CaD for targets. func (pj *PathParams) DWtSynHebb(ctx *Context, syni, si, ri, di uint32, layPool, subPool *Pool) { - rNrnCaP := Neurons.Value(NrnCaP, ri, di) - sNrnCap := Neurons.Value(NrnCaP, si, di) - lwt := Synapses.Value(LWt, syni) // linear weight + rNrnCaP := Neurons.Value(int(NrnCaP), int(ri), int(di)) + sNrnCap := Neurons.Value(int(NrnCaP), int(si), int(di)) + lwt := Synapses.Value(int(LWt), int(syni)) // linear weight hebb := rNrnCaP * (pj.Learn.Hebb.Up*sNrnCap*(1-lwt) - pj.Learn.Hebb.Down*(1-sNrnCap)*lwt) // not: Neurons[RLRate, ri, di]* - SynapseTraces.Set(pj.Learn.LRate.Eff*hebb, DiDWt, syni, di) + SynapseTraces.Set(pj.Learn.LRate.Eff*hebb, int(DiDWt), int(syni), int(di)) } // DWtSynHip computes the weight change (learning) at given synapse for cortex + Hip (CPCA Hebb learning). @@ -401,19 +401,19 @@ func (pj *PathParams) DWtSynHip(ctx *Context, syni, si, ri, di uint32, layPool, pj.SynCa(ctx, si, ri, di, &syCaP, &syCaD) dtr := syCaD // delta trace, caD reflects entire window // save delta trace for GUI - SynapseTraces.Set(dtr, DTr, syni, di) + SynapseTraces.Set(dtr, int(DTr), int(syni), int(di)) // TrFromCa(prev-multiTrial Integrated Trace, deltaTrace), as a mixing func - tr := pj.Learn.Trace.TrFromCa(SynapseTraces.Value(Tr, syni, di), dtr) + tr := pj.Learn.Trace.TrFromCa(SynapseTraces.Value(int(Tr), int(syni), int(di)), dtr) // save new trace, updated w/ credit assignment (dependent on Tau in the TrFromCa function - SynapseTraces.Set(tr, Tr, syni, di) + SynapseTraces.Set(tr, int(Tr), int(syni), int(di)) // failed con, no learn - if Synapses.Value(Wt, syni) == 0 { + if Synapses.Value(int(Wt), int(syni)) == 0 { return } // error-driven learning part - rNrnCaP := Neurons.Value(NrnCaP, ri, di) - rNrnCaD := Neurons.Value(NrnCaD, ri, di) + rNrnCaP := Neurons.Value(int(NrnCaP), int(ri), int(di)) + rNrnCaD := Neurons.Value(int(NrnCaD), int(ri), int(di)) var err float32 if isTarget { err = syCaP - syCaD // for target layers, syn Ca drives error signal directly @@ -423,7 +423,7 @@ func (pj *PathParams) DWtSynHip(ctx *Context, syni, si, ri, di uint32, layPool, // note: trace ensures that nothing changes for inactive synapses.. // sb immediately -- enters into zero sum. // also other types might not use, so need to do this per learning rule - lwt := Synapses.Value(LWt, syni) // linear weight + lwt := Synapses.Value(int(LWt), int(syni)) // linear weight if err > 0 { err *= (1 - lwt) } else { @@ -431,14 +431,14 @@ func (pj *PathParams) DWtSynHip(ctx *Context, syni, si, ri, di uint32, layPool, } // hebbian-learning part - sNrnCap := Neurons.Value(NrnCaP, si, di) + sNrnCap := Neurons.Value(int(NrnCaP), int(si), int(di)) savg := 0.5 + pj.Hip.SAvgCor*(pj.Hip.SNominal-0.5) savg = 0.5 / math32.Max(pj.Hip.SAvgThr, savg) // keep this Sending Average Correction term within bounds (SAvgThr) hebb := rNrnCaP * (sNrnCap*(savg-lwt) - (1-sNrnCap)*lwt) // setting delta weight (note: impossible to be CTCtxtPath) - dwt := Neurons.Value(RLRate, ri, di) * pj.Learn.LRate.Eff * (pj.Hip.Hebb*hebb + pj.Hip.Err*err) - SynapseTraces.Set(dwt, DiDWt, syni, di) + dwt := Neurons.Value(int(RLRate), int(ri), int(di)) * pj.Learn.LRate.Eff * (pj.Hip.Hebb*hebb + pj.Hip.Err*err) + SynapseTraces.Set(dwt, int(DiDWt), int(syni), int(di)) } // DWtSynBLA computes the weight change (learning) at given synapse for BLAPath type. @@ -449,35 +449,35 @@ func (pj *PathParams) DWtSynBLA(ctx *Context, syni, si, ri, di uint32, layPool, dwt := float32(0) ach := GlbV(ctx, di, GvACh) if GlbV(ctx, di, GvHasRew) > 0 { // learn and reset - ract := Neurons.Value(CaSpkD, ri, di) + ract := Neurons.Value(int(CaSpkD), int(ri), int(di)) if ract < pj.Learn.Trace.LearnThr { ract = 0 } tr := SynCaV(ctx, syni, di, Tr) ustr := pj.BLA.USTrace - tr = ustr*Neurons.Value(Burst, si, di) + (1.0-ustr)*tr - delta := Neurons.Value(CaSpkP, ri, di) - Neurons.Value(SpkPrv, ri, di) + tr = ustr*Neurons.Value(int(Burst), int(si), int(di)) + (1.0-ustr)*tr + delta := Neurons.Value(int(CaSpkP), int(ri), int(di)) - Neurons.Value(int(SpkPrv), int(ri), int(di)) if delta < 0 { // neg delta learns slower in Acq, not Ext delta *= pj.BLA.NegDeltaLRate } dwt = tr * delta * ract - SynapseTraces.Set(0.0, Tr, syni, di) + SynapseTraces.Set(0.0, int(Tr), int(syni), int(di)) } else if ach > pj.BLA.AChThr { // note: the former NonUSLRate parameter is not used -- Trace update Tau replaces it.. elegant - dtr := ach * Neurons.Value(Burst, si, di) - SynapseTraces.Set(dtr, DTr, syni, di) + dtr := ach * Neurons.Value(int(Burst), int(si), int(di)) + SynapseTraces.Set(dtr, int(DTr), int(syni), int(di)) tr := pj.Learn.Trace.TrFromCa(SynCaV(ctx, syni, di, Tr), dtr) - SynapseTraces.Set(tr, Tr, syni, di) + SynapseTraces.Set(tr, int(Tr), int(syni), int(di)) } else { - SynapseTraces.Set(0.0, DTr, syni, di) + SynapseTraces.Set(0.0, int(DTr), int(syni), int(di)) } - lwt := Synapses.Value(LWt, syni) + lwt := Synapses.Value(int(LWt), int(syni)) if dwt > 0 { dwt *= (1 - lwt) } else { dwt *= lwt } - SynapseTraces.Set(Neurons[RLRate, ri, di]*pj.Learn.LRate.Eff*dwt, DiDWt, syni, di) + SynapseTraces.Set(Neurons[RLRate, ri, di]*pj.Learn.LRate.Eff*dwt, int(DiDWt), int(syni), int(di)) } // DWtSynRWPred computes the weight change (learning) at given synapse, @@ -489,21 +489,21 @@ func (pj *PathParams) DWtSynRWPred(ctx *Context, syni, si, ri, di uint32, layPoo lr := pj.Learn.LRate.Eff eff_lr := lr if NrnI(ctx, ri, NrnNeurIndex) == 0 { - if Neurons.Value(Ge, ri, di) > Neurons.Value(Act, ri, di) && da > 0 { // clipped at top, saturate up + if Neurons.Value(int(Ge), int(ri), int(di)) > Neurons.Value(int(Act), int(ri), int(di)) && da > 0 { // clipped at top, saturate up da = 0 } - if Neurons.Value(Ge, ri, di) < Neurons.Value(Act, ri, di) && da < 0 { // clipped at bottom, saturate down + if Neurons.Value(int(Ge), int(ri), int(di)) < Neurons.Value(int(Act), int(ri), int(di)) && da < 0 { // clipped at bottom, saturate down da = 0 } if da < 0 { eff_lr *= pj.RLPred.OppSignLRate } } else { - eff_lr = -eff_lr // negative case - if Neurons.Value(Ge, ri, di) > Neurons.Value(Act, ri, di) && da < 0 { // clipped at top, saturate up + eff_lr = -eff_lr // negative case + if Neurons.Value(int(Ge), int(ri), int(di)) > Neurons.Value(int(Act), int(ri), int(di)) && da < 0 { // clipped at top, saturate up da = 0 } - if Neurons.Value(Ge, ri, di) < Neurons.Value(Act, ri, di) && da > 0 { // clipped at bottom, saturate down + if Neurons.Value(int(Ge), int(ri), int(di)) < Neurons.Value(int(Act), int(ri), int(di)) && da > 0 { // clipped at bottom, saturate down da = 0 } if da >= 0 { @@ -511,8 +511,8 @@ func (pj *PathParams) DWtSynRWPred(ctx *Context, syni, si, ri, di uint32, layPoo } } - dwt := da * Neurons.Value(CaSpkP, si, di) // no recv unit activation - SynapseTraces.Set(eff_lr*dwt, DiDWt, syni, di) + dwt := da * Neurons.Value(int(CaSpkP), int(si), int(di)) // no recv unit activation + SynapseTraces.Set(eff_lr*dwt, int(DiDWt), int(syni), int(di)) } // DWtSynTDPred computes the weight change (learning) at given synapse, @@ -535,8 +535,8 @@ func (pj *PathParams) DWtSynTDPred(ctx *Context, syni, si, ri, di uint32, layPoo } } - dwt := da * Neurons.Value(SpkPrv, si, di) // no recv unit activation, prior trial act - SynapseTraces.Set(eff_lr*dwt, DiDWt, syni, di) + dwt := da * Neurons.Value(int(SpkPrv), int(si), int(di)) // no recv unit activation, prior trial act + SynapseTraces.Set(eff_lr*dwt, int(DiDWt), int(syni), int(di)) } // DWtSynVSMatrix computes the weight change (learning) at given synapse, @@ -547,14 +547,14 @@ func (pj *PathParams) DWtSynVSMatrix(ctx *Context, syni, si, ri, di uint32, layP hasRew := GlbV(ctx, di, GvHasRew) > 0 ach := GlbV(ctx, di, GvACh) if !hasRew && ach < 0.1 { - SynapseTraces.Set(0.0, DTr, syni, di) + SynapseTraces.Set(0.0, int(DTr), int(syni), int(di)) return } - rlr := Neurons.Value(RLRate, ri, di) + rlr := Neurons.Value(int(RLRate), int(ri), int(di)) - rplus := Neurons.Value(CaSpkP, ri, di) - rminus := Neurons.Value(CaSpkD, ri, di) - sact := Neurons.Value(CaSpkD, si, di) + rplus := Neurons.Value(int(CaSpkP), int(ri), int(di)) + rminus := Neurons.Value(int(CaSpkD), int(ri), int(di)) + sact := Neurons.Value(int(CaSpkD), int(si), int(di)) dtr := ach * (pj.Matrix.Delta * sact * (rplus - rminus)) if rminus > pj.Learn.Trace.LearnThr { // key: prevents learning if < threshold dtr += ach * (pj.Matrix.Credit * sact * rminus) @@ -565,13 +565,13 @@ func (pj *PathParams) DWtSynVSMatrix(ctx *Context, syni, si, ri, di uint32, layP tr += (1 - GlbV(ctx, di, GvGoalMaint)) * dtr } dwt := rlr * pj.Learn.LRate.Eff * tr - SynapseTraces.Set(dwt, DiDWt, syni, di) - SynapseTraces.Set(0.0, Tr, syni, di) - SynapseTraces.Set(0.0, DTr, syni, di) + SynapseTraces.Set(dwt, int(DiDWt), int(syni), int(di)) + SynapseTraces.Set(0.0, int(Tr), int(syni), int(di)) + SynapseTraces.Set(0.0, int(DTr), int(syni), int(di)) } else { dtr *= rlr - SynapseTraces.Set(dtr, DTr, syni, di) - SynapseTraces.SetAdd(dtr, Tr, syni, di) + SynapseTraces.Set(dtr, int(DTr), int(syni), int(di)) + SynapseTraces.SetAdd(dtr, int(Tr), int(syni), int(di)) } } @@ -580,39 +580,39 @@ func (pj *PathParams) DWtSynVSMatrix(ctx *Context, syni, si, ri, di uint32, layP func (pj *PathParams) DWtSynDSMatrix(ctx *Context, syni, si, ri, di uint32, layPool, subPool *Pool) { // note: rn.RLRate already has ACh * DA * (D1 vs. D2 sign reversal) factored in. - rlr := Neurons.Value(RLRate, ri, di) + rlr := Neurons.Value(int(RLRate), int(ri), int(di)) if GlbV(ctx, di, GvHasRew) > 0 { // US time -- use DA and current recv activity tr := SynCaV(ctx, syni, di, Tr) dwt := rlr * pj.Learn.LRate.Eff * tr - SynapseTraces.Set(dwt, DiDWt, syni, di) - SynapseTraces.Set(0.0, Tr, syni, di) - SynapseTraces.Set(0.0, DTr, syni, di) + SynapseTraces.Set(dwt, int(DiDWt), int(syni), int(di)) + SynapseTraces.Set(0.0, int(Tr), int(syni), int(di)) + SynapseTraces.Set(0.0, int(DTr), int(syni), int(di)) } else { - pfmod := pj.Matrix.BasePF + Neurons.Value(GModSyn, ri, di) - rplus := Neurons.Value(CaSpkP, ri, di) - rminus := Neurons.Value(CaSpkD, ri, di) - sact := Neurons.Value(CaSpkD, si, di) + pfmod := pj.Matrix.BasePF + Neurons.Value(int(GModSyn), int(ri), int(di)) + rplus := Neurons.Value(int(CaSpkP), int(ri), int(di)) + rminus := Neurons.Value(int(CaSpkD), int(ri), int(di)) + sact := Neurons.Value(int(CaSpkD), int(si), int(di)) dtr := rlr * (pj.Matrix.Delta * sact * (rplus - rminus)) if rminus > pj.Learn.Trace.LearnThr { // key: prevents learning if < threshold dtr += rlr * (pj.Matrix.Credit * pfmod * sact * rminus) } - SynapseTraces.Set(dtr, DTr, syni, di) - SynapseTraces.SetAdd(dtr, Tr, syni, di) + SynapseTraces.Set(dtr, int(DTr), int(syni), int(di)) + SynapseTraces.SetAdd(dtr, int(Tr), int(syni), int(di)) } } // DWtSynVSPatch computes the weight change (learning) at given synapse, // for the VSPatchPath type. func (pj *PathParams) DWtSynVSPatch(ctx *Context, syni, si, ri, di uint32, layPool, subPool *Pool) { - ract := Neurons.Value(SpkPrv, ri, di) // t-1 + ract := Neurons.Value(int(SpkPrv), int(ri), int(di)) // t-1 if ract < pj.Learn.Trace.LearnThr { ract = 0 } // note: rn.RLRate already has ACh * DA * (D1 vs. D2 sign reversal) factored in. // and also the logic that non-positive DA leads to weight decreases. - sact := Neurons.Value(SpkPrv, si, di) // t-1 - dwt := Neurons.Value(RLRate, ri, di) * pj.Learn.LRate.Eff * sact * ract - SynapseTraces.Set(dwt, DiDWt, syni, di) + sact := Neurons.Value(int(SpkPrv), int(si), int(di)) // t-1 + dwt := Neurons.Value(int(RLRate), int(ri), int(di)) * pj.Learn.LRate.Eff * sact * ract + SynapseTraces.Set(dwt, int(DiDWt), int(syni), int(di)) } /////////////////////////////////////////////////// @@ -624,7 +624,7 @@ func (pj *PathParams) DWtFromDiDWtSyn(ctx *Context, syni uint32) { for di := uint32(0); di < ctx.NetIndexes.NData; di++ { dwt += SynCaV(ctx, syni, di, DiDWt) } - Synapses.SetAdd(dwt, DWt, syni) + Synapses.SetAdd(dwt, int(DWt), int(syni)) } // WtFromDWtSyn is the overall entry point for updating weights from weight changes. @@ -645,30 +645,30 @@ func (pj *PathParams) WtFromDWtSyn(ctx *Context, syni uint32) { // WtFromDWtSynCortex updates weights from dwt changes func (pj *PathParams) WtFromDWtSynCortex(ctx *Context, syni uint32) { - dwt := Synapses.Value(DWt, syni) - Synapses.SetAdd(dwt, DSWt, syni) - wt := Synapses.Value(Wt, syni) - lwt := Synapses.Value(LWt, syni) - - pj.SWts.WtFromDWt(&wt, &lwt, dwt, Synapses.Value(SWt, syni)) - Synapses.Set(0, DWt, syni) - Synapses.Set(wt, Wt, syni) - Synapses.Set(lwt, LWt, syni) + dwt := Synapses.Value(int(DWt), int(syni)) + Synapses.SetAdd(dwt, int(DSWt), int(syni)) + wt := Synapses.Value(int(Wt), int(syni)) + lwt := Synapses.Value(int(LWt), int(syni)) + + pj.SWts.WtFromDWt(&wt, &lwt, dwt, Synapses.Value(int(SWt), int(syni))) + Synapses.Set(0, int(DWt), int(syni)) + Synapses.Set(wt, int(Wt), int(syni)) + Synapses.Set(lwt, int(LWt), int(syni)) // pj.Com.Fail(&sy.Wt, sy.SWt) // skipping for now -- not useful actually } // WtFromDWtSynNoLimits -- weight update without limits func (pj *PathParams) WtFromDWtSynNoLimits(ctx *Context, syni uint32) { - dwt := Synapses.Value(DWt, syni) + dwt := Synapses.Value(int(DWt), int(syni)) if dwt == 0 { return } - Synapses.SetAdd(dwt, Wt, syni) - if Synapses.Value(Wt, syni) < 0 { - Synapses.Set(0, Wt, syni) + Synapses.SetAdd(dwt, int(Wt), int(syni)) + if Synapses.Value(int(Wt), int(syni)) < 0 { + Synapses.Set(0, int(Wt), int(syni)) } - Synapses.Set(Synapses[Wt, syni], LWt, syni) - Synapses.Set(0, DWt, syni) + Synapses.Set(Synapses[Wt, syni], int(LWt), int(syni)) + Synapses.Set(0, int(DWt), int(syni)) } //gosl:end pathparams diff --git a/kinase/linear.go b/kinase/linear.go index 43b00016..7e8bab7f 100644 --- a/kinase/linear.go +++ b/kinase/linear.go @@ -10,6 +10,7 @@ import ( "math/rand" "cogentcore.org/core/math32" + "cogentcore.org/core/tensor" "cogentcore.org/core/tensor/stats/glm" "cogentcore.org/core/tensor/table" ) @@ -108,11 +109,11 @@ func (ls *Linear) InitTable() { } nneur := ls.NumBins ls.Data.AddIntColumn("Trial") - ls.Data.AddFloat64TensorColumn("Hz", []int{4}, "Send*Recv*Minus*Plus") - ls.Data.AddFloat64TensorColumn("State", []int{nneur}, "States") - ls.Data.AddFloat64TensorColumn("StdCa", []int{2}, "P,D") - ls.Data.AddFloat64TensorColumn("PredCa", []int{2}, "P,D") - ls.Data.AddFloat64TensorColumn("ErrCa", []int{2}, "P,D") + ls.Data.AddFloat64Column("Hz", 4) + ls.Data.AddFloat64Column("State", nneur) + ls.Data.AddFloat64Column("StdCa", 2) + ls.Data.AddFloat64Column("PredCa", 2) + ls.Data.AddFloat64Column("ErrCa", 2) ls.Data.AddFloat64Column("SSE") // total SSE ls.Data.SetNumRows(ls.TotalTrials) } @@ -236,8 +237,8 @@ func (ls *Linear) Run() { } func (ls *Linear) SetSynState(sy *Synapse, row int) { - ls.Data.SetTensorFloat1D("StdCa", row, 0, float64(sy.CaP)) - ls.Data.SetTensorFloat1D("StdCa", row, 1, float64(sy.CaD)) + ls.Data.Column("StdCa").SetFloatRowCell(float64(sy.CaP), row, 0) + ls.Data.Column("StdCa").SetFloatRowCell(float64(sy.CaD), row, 1) } func (ls *Linear) SetBins(sn, rn *Neuron, off, row int) { @@ -245,7 +246,7 @@ func (ls *Linear) SetBins(sn, rn *Neuron, off, row int) { r := rn.SpikeBins[i] bs := (r * s) / 10.0 ls.SpikeBins[i] = bs - ls.Data.SetTensorFloat1D("State", row, off+i, float64(bs)) + ls.Data.Column("State").SetFloatRowCell(float64(bs), row, off+i) } } @@ -253,11 +254,11 @@ func (ls *Linear) SetBins(sn, rn *Neuron, off, row int) { func (ls *Linear) Trial(sendMinusHz, sendPlusHz, recvMinusHz, recvPlusHz float32, ti, row int) { // ls.ErrDWt = (plusHz - minusHz) / 100 - ls.Data.SetFloat("Trial", row, float64(ti)) - ls.Data.SetTensorFloat1D("Hz", row, 0, float64(sendMinusHz)) - ls.Data.SetTensorFloat1D("Hz", row, 1, float64(sendPlusHz)) - ls.Data.SetTensorFloat1D("Hz", row, 2, float64(recvMinusHz)) - ls.Data.SetTensorFloat1D("Hz", row, 3, float64(recvPlusHz)) + ls.Data.Column("Trial").SetFloatRow(float64(ti), row) + ls.Data.Column("Hz").SetFloatRowCell(float64(sendMinusHz), row, 0) + ls.Data.Column("Hz").SetFloat(float64(sendPlusHz), row, 1) + ls.Data.Column("Hz").SetFloat(float64(recvMinusHz), row, 2) + ls.Data.Column("Hz").SetFloat(float64(recvPlusHz), row, 3) minusCycles := ls.NCycles - ls.PlusCycles @@ -297,7 +298,7 @@ func (ls *Linear) Trial(sendMinusHz, sendPlusHz, recvMinusHz, recvPlusHz float32 // Regress runs the linear regression on the data func (ls *Linear) Regress() { r := glm.NewGLM() - err := r.SetTable(table.NewIndexView(&ls.Data), "State", "StdCa", "PredCa", "ErrCa") + err := r.SetTable(&ls.Data, "State", "StdCa", "PredCa", "ErrCa") if err != nil { slog.Error(err.Error()) return @@ -354,5 +355,5 @@ func (ls *Linear) Regress() { fmt.Println(str + "}") } */ - ls.Data.SaveCSV("linear_data.tsv", table.Tab, table.Headers) + ls.Data.SaveCSV("linear_data.tsv", tensor.Tab, table.Headers) }