From 0345a91a7f399c46edf2d106dcca8d35d73e2cec Mon Sep 17 00:00:00 2001 From: "Randall C. O'Reilly" Date: Sun, 3 Nov 2024 13:41:34 -0800 Subject: [PATCH] major update to context handling: mostly now self-contained within network, which maintains the single canonical value; fixed init of spkbin and improved logic: passing TestInitWeights now. --- axon/act.go | 7 + axon/act.goal | 7 + axon/basic_test.go | 302 ++++++++++++++++++--------------------- axon/basic_test.goal | 302 ++++++++++++++++++--------------------- axon/context.go | 3 +- axon/hip_net.go | 12 +- axon/layer-cpu.go | 5 +- axon/layer-cpu.goal | 5 +- axon/layer.go | 76 ++++------ axon/layer.goal | 77 ++++------ axon/layer_test.go | 12 +- axon/layerbase.go | 35 +++-- axon/layerbase.goal | 38 +++-- axon/layerparams.go | 49 ++----- axon/layerparams.goal | 49 ++----- axon/looper.go | 25 ++-- axon/network.go | 248 +++++++++++++------------------- axon/network.goal | 246 +++++++++++++------------------ axon/networkbase.go | 126 ++++++++++++++-- axon/networkbase.goal | 127 ++++++++++++++-- axon/networkbase_test.go | 11 +- axon/pool.go | 2 - axon/pool.goal | 2 - axon/pool_test.go | 27 ++-- axon/rubicon.go | 192 ++++++++++++------------- axon/rubicon.goal | 190 ++++++++++++------------ 26 files changed, 1063 insertions(+), 1112 deletions(-) diff --git a/axon/act.go b/axon/act.go index 89c787d4..19b195d0 100644 --- a/axon/act.go +++ b/axon/act.go @@ -893,6 +893,9 @@ func (ac *ActParams) DecayState(ctx *Context, ni, di uint32, decay, glong, ahp f Neurons.Set(-1, int(ISIAvg), int(ni), int(di)) Neurons.Set(ac.Init.Act, int(ActInt), int(ni), int(di)) Neurons.Set(0, int(Spiked), int(ni), int(di)) + for i := range 8 { + Neurons.Set(0.0, int(SpkBin0+NeuronVars(i)), int(ni), int(di)) + } if decay > 0 { // no-op for most, but not all.. Neurons.Set(0, int(Spike), int(ni), int(di)) @@ -1035,6 +1038,10 @@ func (ac *ActParams) InitActs(ctx *Context, ni, di uint32) { Neurons.Set(0, int(CtxtGeRaw), int(ni), int(di)) Neurons.Set(0, int(CtxtGeOrig), int(ni), int(di)) + for i := range 8 { + Neurons.Set(0.0, int(SpkBin0+NeuronVars(i)), int(ni), int(di)) + } + ac.InitLongActs(ctx, ni, di) } diff --git a/axon/act.goal b/axon/act.goal index 199cb3b4..ed888cdd 100644 --- a/axon/act.goal +++ b/axon/act.goal @@ -891,6 +891,9 @@ func (ac *ActParams) DecayState(ctx *Context, ni, di uint32, decay, glong, ahp f Neurons[ISIAvg, ni, di] = -1 Neurons[ActInt, ni, di] = ac.Init.Act Neurons[Spiked, ni, di] = 0 + for i := range 8 { + Neurons[SpkBin0+NeuronVars(i), ni, di] = 0.0 + } if decay > 0 { // no-op for most, but not all.. Neurons[Spike, ni, di] = 0 @@ -1033,6 +1036,10 @@ func (ac *ActParams) InitActs(ctx *Context, ni, di uint32) { Neurons[CtxtGeRaw, ni, di] = 0 Neurons[CtxtGeOrig, ni, di] = 0 + for i := range 8 { + Neurons[SpkBin0+NeuronVars(i), ni, di] = 0.0 + } + ac.InitLongActs(ctx, ni, di) } diff --git a/axon/basic_test.go b/axon/basic_test.go index ec7f21ff..8114ac1a 100644 --- a/axon/basic_test.go +++ b/axon/basic_test.go @@ -93,7 +93,7 @@ var ParamSets = params.Sets{ func newTestNet(nData int) *Network { testNet := NewNetwork("testNet") testNet.SetRandSeed(42) // critical for ActAvg values - testNet.SetMaxData(ctx, nData) + testNet.SetMaxData(nData) inLay := testNet.AddLayer("Input", InputLayer, 4, 1) hidLay := testNet.AddLayer("Hidden", SuperLayer, 4, 1) @@ -104,15 +104,14 @@ func newTestNet(nData int) *Network { testNet.ConnectLayers(hidLay, outLay, paths.NewOneToOne(), ForwardPath) testNet.ConnectLayers(outLay, hidLay, paths.NewOneToOne(), BackPath) - testNet.Rubicon.SetNUSs(ctx, 4, 3) + testNet.Rubicon.SetNUSs(4, 3) testNet.Rubicon.Defaults() testNet.Build() - testNet.Ctx.NData = uint32(nData) testNet.Defaults() testNet.ApplyParams(ParamSets["Base"], false) // false) // true) // no msg - testNet.InitWeights(ctx) // get GScale here - testNet.NewState(ctx) + testNet.InitWeights() // get GScale here + testNet.NewState(etime.Train) return testNet } @@ -120,7 +119,7 @@ func newTestNet(nData int) *Network { func newTestNetFull(nData int) *Network { testNet := NewNetwork("testNetFull") testNet.SetRandSeed(42) // critical for ActAvg values - testNet.SetMaxData(ctx, nData) + testNet.SetMaxData(nData) inLay := testNet.AddLayer("Input", InputLayer, 4, 1) hidLay := testNet.AddLayer("Hidden", SuperLayer, 4, 1) @@ -132,18 +131,17 @@ func newTestNetFull(nData int) *Network { testNet.ConnectLayers(hidLay, outLay, full, ForwardPath) testNet.ConnectLayers(outLay, hidLay, full, BackPath) - testNet.Build(ctx) - ctx.NData = uint32(nData) + testNet.Build() testNet.Defaults() testNet.ApplyParams(ParamSets["Base"], false) // false) // true) // no msg - testNet.InitWeights(ctx) // get GScale here - testNet.NewState(ctx) + testNet.InitWeights() // get GScale here + testNet.NewState(etime.Train) return testNet } func TestSynValues(t *testing.T) { tol := Tol8 - testNet := newTestNet(ctx, 1) + testNet := newTestNet(1) hidLay := testNet.LayerByName("Hidden") p, err := hidLay.RecvPathBySendName("Input") if err != nil { @@ -205,11 +203,11 @@ func TestSpikeProp(t *testing.T) { pt := net.ConnectLayers(inLay, hidLay, paths.NewOneToOne(), ForwardPath) - net.Build(ctx) + net.Build() net.Defaults() net.ApplyParams(ParamSets["Base"], false) - net.InitExt(ctx) + net.InitExt() pat := tensor.NewFloat32(1, 1) pat.Set(1, 0, 0) @@ -217,19 +215,15 @@ func TestSpikeProp(t *testing.T) { for del := 0; del <= 4; del++ { pt.Params.Com.Delay = uint32(del) pt.Params.Com.MaxDelay = uint32(del) // now need to ensure that >= Delay - net.InitWeights(ctx) // resets Gbuf - net.NewState(ctx) + net.InitWeights() // resets Gbuf + net.NewState(etime.Train) - inLay.ApplyExt(ctx, 0, pat) - - net.NewState(ctx) - ctx.NewState(etime.Train) + inLay.ApplyExt(0, pat) inCyc := 0 hidCyc := 0 for cyc := 0; cyc < 100; cyc++ { net.Cycle() - ctx.CycleInc() // fmt.Println(cyc, Neurons[Ge, hidLay.NeurStIndex, 0], Neurons[GeRaw, hidLay.NeurStIndex, 0]) if Neurons.Value(int(Spike), int(inLay.NeurStIndex), int(0)) > 0 { // fmt.Println("in spike:", cyc) @@ -267,8 +261,9 @@ func StructValues(obj any, vals map[string]float32, key string) { // TestInitWeights tests that initializing the weights results in same state func TestInitWeights(t *testing.T) { nData := 1 - testNet := newTestNet(ctx, nData) + testNet := newTestNet(nData) inPats := newInPats() + ctx := testNet.Context() valMapA := make(map[string]float32) valMapB := make(map[string]float32) @@ -279,22 +274,22 @@ func TestInitWeights(t *testing.T) { var vals []float32 valMap := valMapA - for wi := 0; wi < 2; wi++ { + for wi := range 2 { + // fmt.Println("\n########## Pass:", wi) if wi == 1 { valMap = valMapB } testNet.SetRandSeed(42) // critical for ActAvg values - testNet.InitWeights(ctx) - testNet.InitExt(ctx) - for ni := 0; ni < 4; ni++ { - for li := 0; li < 3; li++ { - ly := testNet.Layers[li] - for di := 0; di < nData; di++ { - key := fmt.Sprintf("Layer: %s\tUnit: %d\tDi: %d", ly.Name, ni, di) - for _, vnm := range NeuronVarNames { - ly.UnitValues(&vals, vnm, di) - vkey := key + fmt.Sprintf("\t%s", vnm) - valMap[vkey] = vals[ni] + testNet.InitWeights() + testNet.InitExt() + for li := 0; li < 3; li++ { + ly := testNet.Layers[li] + for di := range nData { + for _, vnm := range NeuronVarNames { + ly.UnitValues(&vals, vnm, di) + for ni := range 4 { + key := fmt.Sprintf("Layer: %s\tUnit: %d\tDi: %d\t%s", ly.Name, ni, di, vnm) + valMap[key] = vals[ni] } } } @@ -309,35 +304,33 @@ func TestInitWeights(t *testing.T) { } } - for pi := 0; pi < 4; pi++ { - ctx.NewState(etime.Train) - testNet.NewState(ctx) + for pi := range 4 { + testNet.NewState(etime.Train) inpat := inPats.SubSpace(pi) - testNet.InitExt(ctx) + testNet.InitExt() for di := 0; di < nData; di++ { - inLay.ApplyExt(ctx, uint32(di), inpat) - outLay.ApplyExt(ctx, uint32(di), inpat) + inLay.ApplyExt(uint32(di), inpat) + outLay.ApplyExt(uint32(di), inpat) } - testNet.ApplyExts(ctx) // key now for GPU + testNet.ApplyExts() // key now for GPU for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < 50; cyc++ { testNet.Cycle() - ctx.CycleInc() } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } } - testNet.PlusPhase(ctx) - testNet.DWt(ctx) - testNet.WtFromDWt(ctx) + testNet.PlusPhase() + testNet.DWt() + testNet.WtFromDWt() } } - ReportValDiffs(t, Tol8, valMapA, valMapB, "init1", "init2", nil) + ReportValDiffs(t, Tol8, valMapA, valMapB, "init1", "init2") } func TestNetAct(t *testing.T) { @@ -357,8 +350,9 @@ func TestGPUAct(t *testing.T) { // Note: use NetDebugAct for printf debugging of all values -- // "this is only a test" func NetActTest(t *testing.T, tol float32, gpu bool) { - testNet := newTestNet(ctx, 1) - testNet.InitExt(ctx) + testNet := newTestNet(1) + ctx := testNet.Context() + testNet.InitExt() inPats := newInPats() inLay := testNet.LayerByName("Input") @@ -366,7 +360,7 @@ func NetActTest(t *testing.T, tol float32, gpu bool) { outLay := testNet.LayerByName("Output") if gpu { - // testNet.ConfigGPUnoGUI(ctx) + // testNet.ConfigGPUnoGUI() // testNet.GPU.RecFunTimes = true // alt modes // testNet.GPU.CycleByCycle = true // alt modes } @@ -410,27 +404,25 @@ func NetActTest(t *testing.T, tol float32, gpu bool) { cycPerQtr := 50 for pi := 0; pi < 4; pi++ { - testNet.NewState(ctx) - ctx.NewState(etime.Train) + testNet.NewState(etime.Train) inpat := inPats.SubSpace(pi) - testNet.InitExt(ctx) - inLay.ApplyExt(ctx, 0, inpat) - outLay.ApplyExt(ctx, 0, inpat) - testNet.ApplyExts(ctx) // key now for GPU + testNet.InitExt() + inLay.ApplyExt(0, inpat) + outLay.ApplyExt(0, inpat) + testNet.ApplyExts() // key now for GPU for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < cycPerQtr; cyc++ { testNet.Cycle() - ctx.CycleInc() // if gpu { // testNet.GPU.SyncNeuronsFromGPU() // } } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } inLay.UnitValues(&inActs, "Act", 0) @@ -474,7 +466,7 @@ func NetActTest(t *testing.T, tol float32, gpu bool) { CompareFloats(tol, outGis, p1qtr3OutGis, "p1qtr3OutGis", t) } } - testNet.PlusPhase(ctx) + testNet.PlusPhase() } // testNet.GPU.Destroy() @@ -487,7 +479,7 @@ func TestGPUDiffs(t *testing.T) { nonGPUValues := NetDebugAct(t, false, false, 1, false) gpuValues := NetDebugAct(t, false, true, 1, false) // note: this has bad tolerance due to NMDA -- can see that if you raise tol to Tol5 etc - ReportValDiffs(t, Tol4, nonGPUValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol4, nonGPUValues, gpuValues, "CPU", "GPU") } func TestDebugAct(t *testing.T) { @@ -503,7 +495,7 @@ func TestDebugGPUAct(t *testing.T) { func TestNDataDiffs(t *testing.T) { nd1Values := NetDebugAct(t, false, false, 1, true) nd4Values := NetDebugAct(t, false, false, 4, true) - ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", nil) + ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4") } func TestGPUNDataDiffs(t *testing.T) { @@ -512,11 +504,11 @@ func TestGPUNDataDiffs(t *testing.T) { } nd1Values := NetDebugAct(t, false, true, 1, true) nd4Values := NetDebugAct(t, false, true, 4, true) - ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", nil) + ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4") } // ReportValDiffs -- reports diffs between a, b values at given tolerance -func ReportValDiffs(t *testing.T, tolerance float32, va, vb map[string]float32, aLabel, bLabel string, exclude []string) { +func ReportValDiffs(t *testing.T, tolerance float32, va, vb map[string]float32, aLabel, bLabel string, exclude ...string) { keys := maps.Keys(va) sort.Strings(keys) nerrs := 0 @@ -552,16 +544,16 @@ func ReportValDiffs(t *testing.T, tolerance float32, va, vb map[string]float32, // and also returns a map of all values and variables that can be used for a more // fine-grained diff test, e.g., see the GPU version. func NetDebugAct(t *testing.T, printValues bool, gpu bool, nData int, initWts bool) map[string]float32 { - testNet := newTestNet(ctx, nData) + testNet := newTestNet(nData) testNet.ApplyParams(ParamSets["FullDecay"], false) - return RunDebugAct(t, ctx, testNet, printValues, gpu, initWts) + return RunDebugAct(t, testNet, printValues, gpu, initWts) } // RunDebugAct runs and prints selected values (if printValues), // and also returns a map of all values and variables that can be used for a more // fine-grained diff test, e.g., see the GPU version. -func RunDebugAct(t *testing.T, ctx *Context, testNet *Network, printValues bool, gpu bool, initWts bool) map[string]float32 { - +func RunDebugAct(t *testing.T, testNet *Network, printValues bool, gpu bool, initWts bool) map[string]float32 { + ctx := testNet.Context() nData := int(ctx.NData) valMap := make(map[string]float32) inPats := newInPats() @@ -573,7 +565,7 @@ func RunDebugAct(t *testing.T, ctx *Context, testNet *Network, printValues bool, var vals []float32 if gpu { - // testNet.ConfigGPUnoGUI(ctx) + // testNet.ConfigGPUnoGUI() // testNet.GPU.RecFunTimes = true // testNet.GPU.CycleByCycle = true // key for recording results cycle-by-cycle } @@ -591,27 +583,24 @@ func RunDebugAct(t *testing.T, ctx *Context, testNet *Network, printValues bool, for pi := 0; pi < 4; pi++ { if initWts { testNet.SetRandSeed(42) // critical for ActAvg values - testNet.InitWeights(ctx) - } else { - testNet.NewState(ctx) + testNet.InitWeights() } - ctx.NewState(etime.Train) + testNet.NewState(etime.Train) - testNet.InitExt(ctx) + testNet.InitExt() for di := 0; di < nData; di++ { ppi := (pi + di) % 4 inpat := inPats.SubSpace(ppi) _ = inpat - inLay.ApplyExt(ctx, uint32(di), inpat) - outLay.ApplyExt(ctx, uint32(di), inpat) + inLay.ApplyExt(uint32(di), inpat) + outLay.ApplyExt(uint32(di), inpat) } - testNet.ApplyExts(ctx) // key now for GPU + testNet.ApplyExts() // key now for GPU for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < 50; cyc++ { testNet.Cycle() - ctx.CycleInc() for ni := 0; ni < 4; ni++ { for li := 0; li < 3; li++ { @@ -652,13 +641,13 @@ func RunDebugAct(t *testing.T, ctx *Context, testNet *Network, printValues bool, } } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } } - testNet.PlusPhase(ctx) + testNet.PlusPhase() pi += nData - 1 } @@ -678,7 +667,8 @@ func TestGPULearn(t *testing.T) { } func NetTestLearn(t *testing.T, tol float32, gpu bool) { - testNet := newTestNet(ctx, 1) + testNet := newTestNet(1) + ctx := testNet.Context() // fmt.Printf("synbanks: %d\n", ctx.NetIndexes.NSynCaBanks) @@ -729,29 +719,27 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) { testNet.Defaults() testNet.ApplyParams(ParamSets["Base"], false) // always apply base - testNet.InitWeights(ctx) - testNet.InitExt(ctx) + testNet.InitWeights() + testNet.InitExt() if gpu { - // testNet.ConfigGPUnoGUI(ctx) + // testNet.ConfigGPUnoGUI() // testNet.GPU.RecFunTimes = true // alt forms // testNet.GPU.CycleByCycle = true // } for pi := 0; pi < 4; pi++ { - ctx.NewState(etime.Train) - testNet.NewState(ctx) + testNet.NewState(etime.Train) inpat := inPats.SubSpace(pi) - testNet.InitExt(ctx) - inLay.ApplyExt(ctx, 0, inpat) - outLay.ApplyExt(ctx, 0, inpat) - testNet.ApplyExts(ctx) // key now for GPU + testNet.InitExt() + inLay.ApplyExt(0, inpat) + outLay.ApplyExt(0, inpat) + testNet.ApplyExts() // key now for GPU for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < cycPerQtr; cyc++ { testNet.Cycle() - ctx.CycleInc() if gpu { // testNet.GPU.SyncNeuronsFromGPU() } @@ -771,9 +759,9 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) { } } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } hidLay.UnitValues(&hidCaP, "NrnCaP", 0) @@ -795,13 +783,13 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) { } } - testNet.PlusPhase(ctx) + testNet.PlusPhase() if printQtrs { fmt.Printf("=============================\n") } - testNet.DWt(ctx) + testNet.DWt() if gpu { // testNet.GPU.SyncSynapsesFromGPU() // testNet.GPU.SyncSynCaFromGPU() @@ -812,7 +800,7 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) { hiddwt[didx] = hidLay.RecvPaths[0].SynValue("DWt", pi, pi) outdwt[didx] = outLay.RecvPaths[0].SynValue("DWt", pi, pi) - testNet.WtFromDWt(ctx) + testNet.WtFromDWt() if gpu { // testNet.GPU.SyncSynapsesFromGPU() // testNet.GPU.SyncSynCaFromGPU() @@ -847,7 +835,8 @@ func TestGPURLRate(t *testing.T) { } func NetTestRLRate(t *testing.T, tol float32, gpu bool) { - testNet := newTestNet(ctx, 1) + testNet := newTestNet(1) + ctx := testNet.Context() inPats := newInPats() inLay := testNet.LayerByName("Input") hidLay := testNet.LayerByName("Hidden") @@ -901,22 +890,20 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) { testNet.Defaults() testNet.ApplyParams(ParamSets["Base"], false) // always apply base hidLay.Params.Learn.RLRate.On.SetBool(true) - testNet.InitWeights(ctx) - testNet.InitExt(ctx) + testNet.InitWeights() + testNet.InitExt() for pi := 0; pi < 4; pi++ { inpat := inPats.SubSpace(pi) - testNet.InitExt(ctx) - inLay.ApplyExt(ctx, 0, inpat) - outLay.ApplyExt(ctx, 0, inpat) - testNet.ApplyExts(ctx) // key now for GPU + testNet.InitExt() + inLay.ApplyExt(0, inpat) + outLay.ApplyExt(0, inpat) + testNet.ApplyExts() // key now for GPU - ctx.NewState(etime.Train) - testNet.NewState(ctx) + testNet.NewState(etime.Train) for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < cycPerQtr; cyc++ { testNet.Cycle() - ctx.CycleInc() // testNet.GPU.SyncNeuronsFromGPU() hidLay.UnitValues(&hidAct, "Act", 0) @@ -934,9 +921,9 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) { } } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } hidLay.UnitValues(&hidCaP, "NrnCaP", 0) @@ -957,7 +944,7 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) { fmt.Printf("pat: %v qtr: %v cyc: %v\nhid avgs: %v avgm: %v\nout avgs: %v avgm: %v\n", pi, qtr, ctx.Cycle, hidCaP, hidCaD, outCaP, outCaD) } } - testNet.PlusPhase(ctx) + testNet.PlusPhase() if gpu { // testNet.GPU.SyncNeuronsFromGPU() // RLRate updated after plus } @@ -970,7 +957,7 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) { ridx := pi * 4 copy(hidrlrs[ridx:ridx+4], hidRLRate) - testNet.DWt(ctx) + testNet.DWt() if gpu { // testNet.GPU.SyncSynapsesFromGPU() } @@ -980,7 +967,7 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) { hiddwt[didx] = hidLay.RecvPaths[0].SynValue("DWt", pi, pi) outdwt[didx] = outLay.RecvPaths[0].SynValue("DWt", pi, pi) - testNet.WtFromDWt(ctx) + testNet.WtFromDWt() if gpu { // testNet.GPU.SyncSynapsesFromGPU() } @@ -1012,25 +999,26 @@ func NetDebugLearn(t *testing.T, printValues bool, gpu bool, maxData, nData int, rand.Seed(1337) if submean { - testNet = newTestNetFull(ctx, maxData) // otherwise no effect + testNet = newTestNetFull(maxData) // otherwise no effect } else { - testNet = newTestNet(ctx, maxData) + testNet = newTestNet(maxData) } testNet.ApplyParams(ParamSets["FullDecay"], false) + ctx := testNet.Context() if submean { testNet.ApplyParams(ParamSets["SubMean"], false) } ctx.NData = uint32(nData) - return RunDebugLearn(t, ctx, testNet, printValues, gpu, initWts, slowAdapt) + return RunDebugLearn(t, testNet, printValues, gpu, initWts, slowAdapt) } // RunDebugLearn prints selected values (if printValues), // and also returns a map of all values and variables that can be used for a more // fine-grained diff test, e.g., see the GPU version. -func RunDebugLearn(t *testing.T, ctx *Context, testNet *Network, printValues bool, gpu bool, initWts, slowAdapt bool) map[string]float32 { - +func RunDebugLearn(t *testing.T, testNet *Network, printValues bool, gpu bool, initWts, slowAdapt bool) map[string]float32 { + ctx := testNet.Context() nData := int(ctx.NData) valMap := make(map[string]float32) inPats := newInPats() @@ -1040,7 +1028,7 @@ func RunDebugLearn(t *testing.T, ctx *Context, testNet *Network, printValues boo _, _ = inLay, outLay if gpu { - // testNet.ConfigGPUnoGUI(ctx) + // testNet.ConfigGPUnoGUI() // testNet.GPU.CycleByCycle = true // key for printing results cycle-by-cycle } @@ -1058,43 +1046,40 @@ func RunDebugLearn(t *testing.T, ctx *Context, testNet *Network, printValues boo for pi := 0; pi < 4; pi++ { if initWts { testNet.SetRandSeed(42) // critical for ActAvg values - testNet.InitWeights(ctx) - } else { - testNet.NewState(ctx) + testNet.InitWeights() } - ctx.NewState(etime.Train) + testNet.NewState(etime.Train) - testNet.InitExt(ctx) + testNet.InitExt() for di := 0; di < nData; di++ { ppi := (pi + di) % 4 inpat := inPats.SubSpace(ppi) _ = inpat - inLay.ApplyExt(ctx, uint32(di), inpat) - outLay.ApplyExt(ctx, uint32(di), inpat) + inLay.ApplyExt(uint32(di), inpat) + outLay.ApplyExt(uint32(di), inpat) } - testNet.ApplyExts(ctx) // key now for GPU + testNet.ApplyExts() // key now for GPU for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < 50; cyc++ { testNet.Cycle() - ctx.CycleInc() } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } } - testNet.PlusPhase(ctx) - testNet.DWt(ctx) + testNet.PlusPhase() + testNet.DWt() if syncAfterWt { - testNet.WtFromDWt(ctx) + testNet.WtFromDWt() if slowAdapt { // testNet.GPU.SyncSynCaFromGPU() // will be sent back and forth - testNet.SlowAdapt(ctx) + testNet.SlowAdapt() } } if gpu { @@ -1151,9 +1136,9 @@ func RunDebugLearn(t *testing.T, ctx *Context, testNet *Network, printValues boo } if !syncAfterWt { - testNet.WtFromDWt(ctx) + testNet.WtFromDWt() if slowAdapt { - testNet.SlowAdapt(ctx) + testNet.SlowAdapt() } if gpu { // testNet.GPU.SyncSynapsesFromGPU() @@ -1176,13 +1161,13 @@ func TestDebugLearn(t *testing.T) { func TestNDataLearn(t *testing.T) { nd1Values := NetDebugLearn(t, false, false, 1, 1, true, false, false) nd4Values := NetDebugLearn(t, false, false, 4, 4, true, false, false) - ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", []string{"DWt", "ActAvg", "DTrgAvg"}) + ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", "DWt", "ActAvg", "DTrgAvg") } func TestNDataMaxDataLearn(t *testing.T) { nd84Values := NetDebugLearn(t, false, false, 8, 4, false, false, false) nd44Values := NetDebugLearn(t, false, false, 4, 4, false, false, false) - ReportValDiffs(t, Tol8, nd84Values, nd44Values, "maxData = 8, nData = 4", "maxData = 4, nData = 4", []string{"DWt", "ActAvg", "DTrgAvg"}) + ReportValDiffs(t, Tol8, nd84Values, nd44Values, "maxData = 8, nData = 4", "maxData = 4, nData = 4", "DWt", "ActAvg", "DTrgAvg") } func TestGPUNDataLearn(t *testing.T) { @@ -1191,7 +1176,7 @@ func TestGPUNDataLearn(t *testing.T) { } nd1Values := NetDebugLearn(t, false, true, 1, 1, true, false, false) nd4Values := NetDebugLearn(t, false, true, 4, 4, true, false, false) - ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", []string{"DWt", "ActAvg", "DTrgAvg"}) + ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", "DWt", "ActAvg", "DTrgAvg") } func TestGPUNDataMaxDataLearn(t *testing.T) { @@ -1200,7 +1185,7 @@ func TestGPUNDataMaxDataLearn(t *testing.T) { } nd84Values := NetDebugLearn(t, false, true, 8, 4, false, false, false) nd44Values := NetDebugLearn(t, false, true, 4, 4, false, false, false) - ReportValDiffs(t, Tol8, nd84Values, nd44Values, "maxData = 8, nData = 4", "maxData = 4, nData = 4", []string{"DWt", "ActAvg", "DTrgAvg"}) + ReportValDiffs(t, Tol8, nd84Values, nd44Values, "maxData = 8, nData = 4", "maxData = 4, nData = 4", "DWt", "ActAvg", "DTrgAvg") } func TestGPULearnDiff(t *testing.T) { @@ -1211,7 +1196,7 @@ func TestGPULearnDiff(t *testing.T) { cpuValues := NetDebugLearn(t, false, false, 1, 1, false, false, false) // fmt.Printf("\n#############\nGPU\n") gpuValues := NetDebugLearn(t, false, true, 1, 1, false, false, false) - ReportValDiffs(t, Tol4, cpuValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol4, cpuValues, gpuValues, "CPU", "GPU") } func TestGPUSubMeanLearn(t *testing.T) { @@ -1223,7 +1208,7 @@ func TestGPUSubMeanLearn(t *testing.T) { // fmt.Printf("\n#############\nGPU\n") gpuValues := NetDebugLearn(t, false, true, 1, 1, false, true, false) // this has bad tolerance, due to GABAB -- similar to NMDA issues - ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU") } func TestGPUSlowAdaptLearn(t *testing.T) { @@ -1235,16 +1220,16 @@ func TestGPUSlowAdaptLearn(t *testing.T) { // fmt.Printf("\n#############\nGPU\n") gpuValues := NetDebugLearn(t, false, true, 1, 1, false, false, true) // this has bad tolerance, due to GABAB -- similar to NMDA issues - ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU") } func TestGPUSynCa(t *testing.T) { if os.Getenv("TEST_GPU") != "true" { t.Skip("Set TEST_GPU env var to run GPU tests") } - testNet := newTestNetFull(ctx, 16) + testNet := newTestNetFull(16) _ = testNet - // testNet.ConfigGPUnoGUI(ctx) + // testNet.ConfigGPUnoGUI() // passed := testNet.GPU.TestSynCa() // // if !passed { @@ -1270,15 +1255,16 @@ func TestInhibAct(t *testing.T) { inhibNet.ConnectLayers(hidLay, outLay, one2one, ForwardPath) inhibNet.ConnectLayers(outLay, hidLay, one2one, BackPath) - inhibNet.Build(ctx) + inhibNet.Build() inhibNet.Defaults() inhibNet.ApplyParams(ParamSets["Base"], false) inhibNet.ApplyParams(ParamSets["Base"], false) - inhibNet.InitWeights(ctx) // get GScale - inhibNet.NewState(ctx) + inhibNet.InitWeights() // get GScale + inhibNet.NewState(etime.Train) - inhibNet.InitWeights(ctx) - inhibNet.InitExt(ctx) + inhibNet.InitWeights() + inhibNet.InitExt() + ctx := inhibNet.Context() printCycs := false printQtrs := false @@ -1309,15 +1295,13 @@ func TestInhibAct(t *testing.T) { for pi := 0; pi < 4; pi++ { inpat := inPats.SubSpace(pi) - inLay.ApplyExt(ctx, 0, inpat) - outLay.ApplyExt(ctx, 0, inpat) + inLay.ApplyExt(0, inpat) + outLay.ApplyExt(0, inpat) - inhibNet.NewState(ctx) - ctx.NewState(etime.Train) + inhibNet.NewState(etime.Train) for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < cycPerQtr; cyc++ { inhibNet.Cycle() - ctx.CycleInc() if printCycs { inLay.UnitValues(&inActs, "Act", 0) @@ -1331,9 +1315,9 @@ func TestInhibAct(t *testing.T) { } } if qtr == 2 { - inhibNet.MinusPhase(ctx) + inhibNet.MinusPhase() ctx.NewPhase(false) - inhibNet.PlusPhaseStart(ctx) + inhibNet.PlusPhaseStart() } if printCycs && printQtrs { @@ -1373,7 +1357,7 @@ func TestInhibAct(t *testing.T) { CompareFloats(tol, outGis, qtr3OutGis, "qtr3OutGis", t) } } - inhibNet.PlusPhase(ctx) + inhibNet.PlusPhase() if printQtrs { fmt.Printf("=============================\n") @@ -1397,7 +1381,7 @@ func saveToFile(net *Network, t *testing.T) { func TestSendGatherIndexes(t *testing.T) { nData := uint32(3) - net := newTestNet(ctx, int(nData)) + net := newTestNet(int(nData)) maxDel := net.NetIxs().MaxDelay + 1 maxCyc := int32(2 * maxDel) @@ -1563,7 +1547,7 @@ func TestRubiconGiveUp(t *testing.T) { // // // fmt.Printf("Wts Mean: %g\t Var: %g\t SPct: %g\n", mean, vr, spct) // for i := 0; i < nsamp; i++ { -// pj.SWts.InitWeightsSyn(ctx, &nt.Rand, sy, mean, spct) +// pj.SWts.InitWeightsSyn(&nt.Rand, sy, mean, spct) // dt.SetFloat("Wt", i, float64(sy.Wt)) // dt.SetFloat("LWt", i, float64(sy.LWt)) // dt.SetFloat("SWt", i, float64(sy.SWt)) diff --git a/axon/basic_test.goal b/axon/basic_test.goal index 55e3da44..faed9759 100644 --- a/axon/basic_test.goal +++ b/axon/basic_test.goal @@ -91,7 +91,7 @@ var ParamSets = params.Sets{ func newTestNet(nData int) *Network { testNet := NewNetwork("testNet") testNet.SetRandSeed(42) // critical for ActAvg values - testNet.SetMaxData(ctx, nData) + testNet.SetMaxData(nData) inLay := testNet.AddLayer("Input", InputLayer, 4, 1) hidLay := testNet.AddLayer("Hidden", SuperLayer, 4, 1) @@ -102,15 +102,14 @@ func newTestNet(nData int) *Network { testNet.ConnectLayers(hidLay, outLay, paths.NewOneToOne(), ForwardPath) testNet.ConnectLayers(outLay, hidLay, paths.NewOneToOne(), BackPath) - testNet.Rubicon.SetNUSs(ctx, 4, 3) + testNet.Rubicon.SetNUSs(4, 3) testNet.Rubicon.Defaults() testNet.Build() - testNet.Ctx.NData = uint32(nData) testNet.Defaults() testNet.ApplyParams(ParamSets["Base"], false) // false) // true) // no msg - testNet.InitWeights(ctx) // get GScale here - testNet.NewState(ctx) + testNet.InitWeights() // get GScale here + testNet.NewState(etime.Train) return testNet } @@ -118,7 +117,7 @@ func newTestNet(nData int) *Network { func newTestNetFull(nData int) *Network { testNet := NewNetwork("testNetFull") testNet.SetRandSeed(42) // critical for ActAvg values - testNet.SetMaxData(ctx, nData) + testNet.SetMaxData(nData) inLay := testNet.AddLayer("Input", InputLayer, 4, 1) hidLay := testNet.AddLayer("Hidden", SuperLayer, 4, 1) @@ -130,18 +129,17 @@ func newTestNetFull(nData int) *Network { testNet.ConnectLayers(hidLay, outLay, full, ForwardPath) testNet.ConnectLayers(outLay, hidLay, full, BackPath) - testNet.Build(ctx) - ctx.NData = uint32(nData) + testNet.Build() testNet.Defaults() testNet.ApplyParams(ParamSets["Base"], false) // false) // true) // no msg - testNet.InitWeights(ctx) // get GScale here - testNet.NewState(ctx) + testNet.InitWeights() // get GScale here + testNet.NewState(etime.Train) return testNet } func TestSynValues(t *testing.T) { tol := Tol8 - testNet := newTestNet(ctx, 1) + testNet := newTestNet(1) hidLay := testNet.LayerByName("Hidden") p, err := hidLay.RecvPathBySendName("Input") if err != nil { @@ -203,11 +201,11 @@ func TestSpikeProp(t *testing.T) { pt := net.ConnectLayers(inLay, hidLay, paths.NewOneToOne(), ForwardPath) - net.Build(ctx) + net.Build() net.Defaults() net.ApplyParams(ParamSets["Base"], false) - net.InitExt(ctx) + net.InitExt() pat := tensor.NewFloat32(1, 1) pat.Set(1, 0, 0) @@ -215,19 +213,15 @@ func TestSpikeProp(t *testing.T) { for del := 0; del <= 4; del++ { pt.Params.Com.Delay = uint32(del) pt.Params.Com.MaxDelay = uint32(del) // now need to ensure that >= Delay - net.InitWeights(ctx) // resets Gbuf - net.NewState(ctx) + net.InitWeights() // resets Gbuf + net.NewState(etime.Train) - inLay.ApplyExt(ctx, 0, pat) - - net.NewState(ctx) - ctx.NewState(etime.Train) + inLay.ApplyExt(0, pat) inCyc := 0 hidCyc := 0 for cyc := 0; cyc < 100; cyc++ { net.Cycle() - ctx.CycleInc() // fmt.Println(cyc, Neurons[Ge, hidLay.NeurStIndex, 0], Neurons[GeRaw, hidLay.NeurStIndex, 0]) if Neurons[Spike, inLay.NeurStIndex, 0] > 0 { // fmt.Println("in spike:", cyc) @@ -265,8 +259,9 @@ func StructValues(obj any, vals map[string]float32, key string) { // TestInitWeights tests that initializing the weights results in same state func TestInitWeights(t *testing.T) { nData := 1 - testNet := newTestNet(ctx, nData) + testNet := newTestNet(nData) inPats := newInPats() + ctx := testNet.Context() valMapA := make(map[string]float32) valMapB := make(map[string]float32) @@ -277,22 +272,22 @@ func TestInitWeights(t *testing.T) { var vals []float32 valMap := valMapA - for wi := 0; wi < 2; wi++ { + for wi := range 2 { + // fmt.Println("\n########## Pass:", wi) if wi == 1 { valMap = valMapB } testNet.SetRandSeed(42) // critical for ActAvg values - testNet.InitWeights(ctx) - testNet.InitExt(ctx) - for ni := 0; ni < 4; ni++ { - for li := 0; li < 3; li++ { - ly := testNet.Layers[li] - for di := 0; di < nData; di++ { - key := fmt.Sprintf("Layer: %s\tUnit: %d\tDi: %d", ly.Name, ni, di) - for _, vnm := range NeuronVarNames { - ly.UnitValues(&vals, vnm, di) - vkey := key + fmt.Sprintf("\t%s", vnm) - valMap[vkey] = vals[ni] + testNet.InitWeights() + testNet.InitExt() + for li := 0; li < 3; li++ { + ly := testNet.Layers[li] + for di := range nData { + for _, vnm := range NeuronVarNames { + ly.UnitValues(&vals, vnm, di) + for ni := range 4 { + key := fmt.Sprintf("Layer: %s\tUnit: %d\tDi: %d\t%s", ly.Name, ni, di, vnm) + valMap[key] = vals[ni] } } } @@ -307,35 +302,33 @@ func TestInitWeights(t *testing.T) { } } - for pi := 0; pi < 4; pi++ { - ctx.NewState(etime.Train) - testNet.NewState(ctx) + for pi := range 4 { + testNet.NewState(etime.Train) inpat := inPats.SubSpace(pi) - testNet.InitExt(ctx) + testNet.InitExt() for di := 0; di < nData; di++ { - inLay.ApplyExt(ctx, uint32(di), inpat) - outLay.ApplyExt(ctx, uint32(di), inpat) + inLay.ApplyExt(uint32(di), inpat) + outLay.ApplyExt(uint32(di), inpat) } - testNet.ApplyExts(ctx) // key now for GPU + testNet.ApplyExts() // key now for GPU for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < 50; cyc++ { testNet.Cycle() - ctx.CycleInc() } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } } - testNet.PlusPhase(ctx) - testNet.DWt(ctx) - testNet.WtFromDWt(ctx) + testNet.PlusPhase() + testNet.DWt() + testNet.WtFromDWt() } } - ReportValDiffs(t, Tol8, valMapA, valMapB, "init1", "init2", nil) + ReportValDiffs(t, Tol8, valMapA, valMapB, "init1", "init2") } func TestNetAct(t *testing.T) { @@ -355,8 +348,9 @@ func TestGPUAct(t *testing.T) { // Note: use NetDebugAct for printf debugging of all values -- // "this is only a test" func NetActTest(t *testing.T, tol float32, gpu bool) { - testNet := newTestNet(ctx, 1) - testNet.InitExt(ctx) + testNet := newTestNet(1) + ctx := testNet.Context() + testNet.InitExt() inPats := newInPats() inLay := testNet.LayerByName("Input") @@ -364,7 +358,7 @@ func NetActTest(t *testing.T, tol float32, gpu bool) { outLay := testNet.LayerByName("Output") if gpu { - // testNet.ConfigGPUnoGUI(ctx) + // testNet.ConfigGPUnoGUI() // testNet.GPU.RecFunTimes = true // alt modes // testNet.GPU.CycleByCycle = true // alt modes } @@ -408,27 +402,25 @@ func NetActTest(t *testing.T, tol float32, gpu bool) { cycPerQtr := 50 for pi := 0; pi < 4; pi++ { - testNet.NewState(ctx) - ctx.NewState(etime.Train) + testNet.NewState(etime.Train) inpat := inPats.SubSpace(pi) - testNet.InitExt(ctx) - inLay.ApplyExt(ctx, 0, inpat) - outLay.ApplyExt(ctx, 0, inpat) - testNet.ApplyExts(ctx) // key now for GPU + testNet.InitExt() + inLay.ApplyExt(0, inpat) + outLay.ApplyExt(0, inpat) + testNet.ApplyExts() // key now for GPU for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < cycPerQtr; cyc++ { testNet.Cycle() - ctx.CycleInc() // if gpu { // testNet.GPU.SyncNeuronsFromGPU() // } } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } inLay.UnitValues(&inActs, "Act", 0) @@ -472,7 +464,7 @@ func NetActTest(t *testing.T, tol float32, gpu bool) { CompareFloats(tol, outGis, p1qtr3OutGis, "p1qtr3OutGis", t) } } - testNet.PlusPhase(ctx) + testNet.PlusPhase() } // testNet.GPU.Destroy() @@ -485,7 +477,7 @@ func TestGPUDiffs(t *testing.T) { nonGPUValues := NetDebugAct(t, false, false, 1, false) gpuValues := NetDebugAct(t, false, true, 1, false) // note: this has bad tolerance due to NMDA -- can see that if you raise tol to Tol5 etc - ReportValDiffs(t, Tol4, nonGPUValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol4, nonGPUValues, gpuValues, "CPU", "GPU") } func TestDebugAct(t *testing.T) { @@ -501,7 +493,7 @@ func TestDebugGPUAct(t *testing.T) { func TestNDataDiffs(t *testing.T) { nd1Values := NetDebugAct(t, false, false, 1, true) nd4Values := NetDebugAct(t, false, false, 4, true) - ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", nil) + ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4") } func TestGPUNDataDiffs(t *testing.T) { @@ -510,11 +502,11 @@ func TestGPUNDataDiffs(t *testing.T) { } nd1Values := NetDebugAct(t, false, true, 1, true) nd4Values := NetDebugAct(t, false, true, 4, true) - ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", nil) + ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4") } // ReportValDiffs -- reports diffs between a, b values at given tolerance -func ReportValDiffs(t *testing.T, tolerance float32, va, vb map[string]float32, aLabel, bLabel string, exclude []string) { +func ReportValDiffs(t *testing.T, tolerance float32, va, vb map[string]float32, aLabel, bLabel string, exclude ...string) { keys := maps.Keys(va) sort.Strings(keys) nerrs := 0 @@ -550,16 +542,16 @@ func ReportValDiffs(t *testing.T, tolerance float32, va, vb map[string]float32, // and also returns a map of all values and variables that can be used for a more // fine-grained diff test, e.g., see the GPU version. func NetDebugAct(t *testing.T, printValues bool, gpu bool, nData int, initWts bool) map[string]float32 { - testNet := newTestNet(ctx, nData) + testNet := newTestNet(nData) testNet.ApplyParams(ParamSets["FullDecay"], false) - return RunDebugAct(t, ctx, testNet, printValues, gpu, initWts) + return RunDebugAct(t, testNet, printValues, gpu, initWts) } // RunDebugAct runs and prints selected values (if printValues), // and also returns a map of all values and variables that can be used for a more // fine-grained diff test, e.g., see the GPU version. -func RunDebugAct(t *testing.T, ctx *Context, testNet *Network, printValues bool, gpu bool, initWts bool) map[string]float32 { - +func RunDebugAct(t *testing.T, testNet *Network, printValues bool, gpu bool, initWts bool) map[string]float32 { + ctx := testNet.Context() nData := int(ctx.NData) valMap := make(map[string]float32) inPats := newInPats() @@ -571,7 +563,7 @@ func RunDebugAct(t *testing.T, ctx *Context, testNet *Network, printValues bool, var vals []float32 if gpu { - // testNet.ConfigGPUnoGUI(ctx) + // testNet.ConfigGPUnoGUI() // testNet.GPU.RecFunTimes = true // testNet.GPU.CycleByCycle = true // key for recording results cycle-by-cycle } @@ -589,27 +581,24 @@ func RunDebugAct(t *testing.T, ctx *Context, testNet *Network, printValues bool, for pi := 0; pi < 4; pi++ { if initWts { testNet.SetRandSeed(42) // critical for ActAvg values - testNet.InitWeights(ctx) - } else { - testNet.NewState(ctx) + testNet.InitWeights() } - ctx.NewState(etime.Train) + testNet.NewState(etime.Train) - testNet.InitExt(ctx) + testNet.InitExt() for di := 0; di < nData; di++ { ppi := (pi + di) % 4 inpat := inPats.SubSpace(ppi) _ = inpat - inLay.ApplyExt(ctx, uint32(di), inpat) - outLay.ApplyExt(ctx, uint32(di), inpat) + inLay.ApplyExt(uint32(di), inpat) + outLay.ApplyExt(uint32(di), inpat) } - testNet.ApplyExts(ctx) // key now for GPU + testNet.ApplyExts() // key now for GPU for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < 50; cyc++ { testNet.Cycle() - ctx.CycleInc() for ni := 0; ni < 4; ni++ { for li := 0; li < 3; li++ { @@ -650,13 +639,13 @@ func RunDebugAct(t *testing.T, ctx *Context, testNet *Network, printValues bool, } } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } } - testNet.PlusPhase(ctx) + testNet.PlusPhase() pi += nData - 1 } @@ -676,7 +665,8 @@ func TestGPULearn(t *testing.T) { } func NetTestLearn(t *testing.T, tol float32, gpu bool) { - testNet := newTestNet(ctx, 1) + testNet := newTestNet(1) + ctx := testNet.Context() // fmt.Printf("synbanks: %d\n", ctx.NetIndexes.NSynCaBanks) @@ -727,29 +717,27 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) { testNet.Defaults() testNet.ApplyParams(ParamSets["Base"], false) // always apply base - testNet.InitWeights(ctx) - testNet.InitExt(ctx) + testNet.InitWeights() + testNet.InitExt() if gpu { - // testNet.ConfigGPUnoGUI(ctx) + // testNet.ConfigGPUnoGUI() // testNet.GPU.RecFunTimes = true // alt forms // testNet.GPU.CycleByCycle = true // } for pi := 0; pi < 4; pi++ { - ctx.NewState(etime.Train) - testNet.NewState(ctx) + testNet.NewState(etime.Train) inpat := inPats.SubSpace(pi) - testNet.InitExt(ctx) - inLay.ApplyExt(ctx, 0, inpat) - outLay.ApplyExt(ctx, 0, inpat) - testNet.ApplyExts(ctx) // key now for GPU + testNet.InitExt() + inLay.ApplyExt(0, inpat) + outLay.ApplyExt(0, inpat) + testNet.ApplyExts() // key now for GPU for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < cycPerQtr; cyc++ { testNet.Cycle() - ctx.CycleInc() if gpu { // testNet.GPU.SyncNeuronsFromGPU() } @@ -769,9 +757,9 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) { } } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } hidLay.UnitValues(&hidCaP, "NrnCaP", 0) @@ -793,13 +781,13 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) { } } - testNet.PlusPhase(ctx) + testNet.PlusPhase() if printQtrs { fmt.Printf("=============================\n") } - testNet.DWt(ctx) + testNet.DWt() if gpu { // testNet.GPU.SyncSynapsesFromGPU() // testNet.GPU.SyncSynCaFromGPU() @@ -810,7 +798,7 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) { hiddwt[didx] = hidLay.RecvPaths[0].SynValue("DWt", pi, pi) outdwt[didx] = outLay.RecvPaths[0].SynValue("DWt", pi, pi) - testNet.WtFromDWt(ctx) + testNet.WtFromDWt() if gpu { // testNet.GPU.SyncSynapsesFromGPU() // testNet.GPU.SyncSynCaFromGPU() @@ -845,7 +833,8 @@ func TestGPURLRate(t *testing.T) { } func NetTestRLRate(t *testing.T, tol float32, gpu bool) { - testNet := newTestNet(ctx, 1) + testNet := newTestNet(1) + ctx := testNet.Context() inPats := newInPats() inLay := testNet.LayerByName("Input") hidLay := testNet.LayerByName("Hidden") @@ -899,22 +888,20 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) { testNet.Defaults() testNet.ApplyParams(ParamSets["Base"], false) // always apply base hidLay.Params.Learn.RLRate.On.SetBool(true) - testNet.InitWeights(ctx) - testNet.InitExt(ctx) + testNet.InitWeights() + testNet.InitExt() for pi := 0; pi < 4; pi++ { inpat := inPats.SubSpace(pi) - testNet.InitExt(ctx) - inLay.ApplyExt(ctx, 0, inpat) - outLay.ApplyExt(ctx, 0, inpat) - testNet.ApplyExts(ctx) // key now for GPU + testNet.InitExt() + inLay.ApplyExt(0, inpat) + outLay.ApplyExt(0, inpat) + testNet.ApplyExts() // key now for GPU - ctx.NewState(etime.Train) - testNet.NewState(ctx) + testNet.NewState(etime.Train) for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < cycPerQtr; cyc++ { testNet.Cycle() - ctx.CycleInc() // testNet.GPU.SyncNeuronsFromGPU() hidLay.UnitValues(&hidAct, "Act", 0) @@ -932,9 +919,9 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) { } } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } hidLay.UnitValues(&hidCaP, "NrnCaP", 0) @@ -955,7 +942,7 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) { fmt.Printf("pat: %v qtr: %v cyc: %v\nhid avgs: %v avgm: %v\nout avgs: %v avgm: %v\n", pi, qtr, ctx.Cycle, hidCaP, hidCaD, outCaP, outCaD) } } - testNet.PlusPhase(ctx) + testNet.PlusPhase() if gpu { // testNet.GPU.SyncNeuronsFromGPU() // RLRate updated after plus } @@ -968,7 +955,7 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) { ridx := pi * 4 copy(hidrlrs[ridx:ridx+4], hidRLRate) - testNet.DWt(ctx) + testNet.DWt() if gpu { // testNet.GPU.SyncSynapsesFromGPU() } @@ -978,7 +965,7 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) { hiddwt[didx] = hidLay.RecvPaths[0].SynValue("DWt", pi, pi) outdwt[didx] = outLay.RecvPaths[0].SynValue("DWt", pi, pi) - testNet.WtFromDWt(ctx) + testNet.WtFromDWt() if gpu { // testNet.GPU.SyncSynapsesFromGPU() } @@ -1010,25 +997,26 @@ func NetDebugLearn(t *testing.T, printValues bool, gpu bool, maxData, nData int, rand.Seed(1337) if submean { - testNet = newTestNetFull(ctx, maxData) // otherwise no effect + testNet = newTestNetFull(maxData) // otherwise no effect } else { - testNet = newTestNet(ctx, maxData) + testNet = newTestNet(maxData) } testNet.ApplyParams(ParamSets["FullDecay"], false) + ctx := testNet.Context() if submean { testNet.ApplyParams(ParamSets["SubMean"], false) } ctx.NData = uint32(nData) - return RunDebugLearn(t, ctx, testNet, printValues, gpu, initWts, slowAdapt) + return RunDebugLearn(t, testNet, printValues, gpu, initWts, slowAdapt) } // RunDebugLearn prints selected values (if printValues), // and also returns a map of all values and variables that can be used for a more // fine-grained diff test, e.g., see the GPU version. -func RunDebugLearn(t *testing.T, ctx *Context, testNet *Network, printValues bool, gpu bool, initWts, slowAdapt bool) map[string]float32 { - +func RunDebugLearn(t *testing.T, testNet *Network, printValues bool, gpu bool, initWts, slowAdapt bool) map[string]float32 { + ctx := testNet.Context() nData := int(ctx.NData) valMap := make(map[string]float32) inPats := newInPats() @@ -1038,7 +1026,7 @@ func RunDebugLearn(t *testing.T, ctx *Context, testNet *Network, printValues boo _, _ = inLay, outLay if gpu { - // testNet.ConfigGPUnoGUI(ctx) + // testNet.ConfigGPUnoGUI() // testNet.GPU.CycleByCycle = true // key for printing results cycle-by-cycle } @@ -1056,43 +1044,40 @@ func RunDebugLearn(t *testing.T, ctx *Context, testNet *Network, printValues boo for pi := 0; pi < 4; pi++ { if initWts { testNet.SetRandSeed(42) // critical for ActAvg values - testNet.InitWeights(ctx) - } else { - testNet.NewState(ctx) + testNet.InitWeights() } - ctx.NewState(etime.Train) + testNet.NewState(etime.Train) - testNet.InitExt(ctx) + testNet.InitExt() for di := 0; di < nData; di++ { ppi := (pi + di) % 4 inpat := inPats.SubSpace(ppi) _ = inpat - inLay.ApplyExt(ctx, uint32(di), inpat) - outLay.ApplyExt(ctx, uint32(di), inpat) + inLay.ApplyExt(uint32(di), inpat) + outLay.ApplyExt(uint32(di), inpat) } - testNet.ApplyExts(ctx) // key now for GPU + testNet.ApplyExts() // key now for GPU for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < 50; cyc++ { testNet.Cycle() - ctx.CycleInc() } if qtr == 2 { - testNet.MinusPhase(ctx) + testNet.MinusPhase() ctx.NewPhase(false) - testNet.PlusPhaseStart(ctx) + testNet.PlusPhaseStart() } } - testNet.PlusPhase(ctx) - testNet.DWt(ctx) + testNet.PlusPhase() + testNet.DWt() if syncAfterWt { - testNet.WtFromDWt(ctx) + testNet.WtFromDWt() if slowAdapt { // testNet.GPU.SyncSynCaFromGPU() // will be sent back and forth - testNet.SlowAdapt(ctx) + testNet.SlowAdapt() } } if gpu { @@ -1149,9 +1134,9 @@ func RunDebugLearn(t *testing.T, ctx *Context, testNet *Network, printValues boo } if !syncAfterWt { - testNet.WtFromDWt(ctx) + testNet.WtFromDWt() if slowAdapt { - testNet.SlowAdapt(ctx) + testNet.SlowAdapt() } if gpu { // testNet.GPU.SyncSynapsesFromGPU() @@ -1174,13 +1159,13 @@ func TestDebugLearn(t *testing.T) { func TestNDataLearn(t *testing.T) { nd1Values := NetDebugLearn(t, false, false, 1, 1, true, false, false) nd4Values := NetDebugLearn(t, false, false, 4, 4, true, false, false) - ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", []string{"DWt", "ActAvg", "DTrgAvg"}) + ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", "DWt", "ActAvg", "DTrgAvg") } func TestNDataMaxDataLearn(t *testing.T) { nd84Values := NetDebugLearn(t, false, false, 8, 4, false, false, false) nd44Values := NetDebugLearn(t, false, false, 4, 4, false, false, false) - ReportValDiffs(t, Tol8, nd84Values, nd44Values, "maxData = 8, nData = 4", "maxData = 4, nData = 4", []string{"DWt", "ActAvg", "DTrgAvg"}) + ReportValDiffs(t, Tol8, nd84Values, nd44Values, "maxData = 8, nData = 4", "maxData = 4, nData = 4", "DWt", "ActAvg", "DTrgAvg") } func TestGPUNDataLearn(t *testing.T) { @@ -1189,7 +1174,7 @@ func TestGPUNDataLearn(t *testing.T) { } nd1Values := NetDebugLearn(t, false, true, 1, 1, true, false, false) nd4Values := NetDebugLearn(t, false, true, 4, 4, true, false, false) - ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", []string{"DWt", "ActAvg", "DTrgAvg"}) + ReportValDiffs(t, Tol8, nd1Values, nd4Values, "nData = 1", "nData = 4", "DWt", "ActAvg", "DTrgAvg") } func TestGPUNDataMaxDataLearn(t *testing.T) { @@ -1198,7 +1183,7 @@ func TestGPUNDataMaxDataLearn(t *testing.T) { } nd84Values := NetDebugLearn(t, false, true, 8, 4, false, false, false) nd44Values := NetDebugLearn(t, false, true, 4, 4, false, false, false) - ReportValDiffs(t, Tol8, nd84Values, nd44Values, "maxData = 8, nData = 4", "maxData = 4, nData = 4", []string{"DWt", "ActAvg", "DTrgAvg"}) + ReportValDiffs(t, Tol8, nd84Values, nd44Values, "maxData = 8, nData = 4", "maxData = 4, nData = 4", "DWt", "ActAvg", "DTrgAvg") } func TestGPULearnDiff(t *testing.T) { @@ -1209,7 +1194,7 @@ func TestGPULearnDiff(t *testing.T) { cpuValues := NetDebugLearn(t, false, false, 1, 1, false, false, false) // fmt.Printf("\n#############\nGPU\n") gpuValues := NetDebugLearn(t, false, true, 1, 1, false, false, false) - ReportValDiffs(t, Tol4, cpuValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol4, cpuValues, gpuValues, "CPU", "GPU") } func TestGPUSubMeanLearn(t *testing.T) { @@ -1221,7 +1206,7 @@ func TestGPUSubMeanLearn(t *testing.T) { // fmt.Printf("\n#############\nGPU\n") gpuValues := NetDebugLearn(t, false, true, 1, 1, false, true, false) // this has bad tolerance, due to GABAB -- similar to NMDA issues - ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU") } func TestGPUSlowAdaptLearn(t *testing.T) { @@ -1233,16 +1218,16 @@ func TestGPUSlowAdaptLearn(t *testing.T) { // fmt.Printf("\n#############\nGPU\n") gpuValues := NetDebugLearn(t, false, true, 1, 1, false, false, true) // this has bad tolerance, due to GABAB -- similar to NMDA issues - ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU") } func TestGPUSynCa(t *testing.T) { if os.Getenv("TEST_GPU") != "true" { t.Skip("Set TEST_GPU env var to run GPU tests") } - testNet := newTestNetFull(ctx, 16) + testNet := newTestNetFull(16) _ = testNet - // testNet.ConfigGPUnoGUI(ctx) + // testNet.ConfigGPUnoGUI() // passed := testNet.GPU.TestSynCa() // if !passed { // t.Errorf("GPU SynCa write failed\n") @@ -1267,15 +1252,16 @@ func TestInhibAct(t *testing.T) { inhibNet.ConnectLayers(hidLay, outLay, one2one, ForwardPath) inhibNet.ConnectLayers(outLay, hidLay, one2one, BackPath) - inhibNet.Build(ctx) + inhibNet.Build() inhibNet.Defaults() inhibNet.ApplyParams(ParamSets["Base"], false) inhibNet.ApplyParams(ParamSets["Base"], false) - inhibNet.InitWeights(ctx) // get GScale - inhibNet.NewState(ctx) + inhibNet.InitWeights() // get GScale + inhibNet.NewState(etime.Train) - inhibNet.InitWeights(ctx) - inhibNet.InitExt(ctx) + inhibNet.InitWeights() + inhibNet.InitExt() + ctx := inhibNet.Context() printCycs := false printQtrs := false @@ -1306,15 +1292,13 @@ func TestInhibAct(t *testing.T) { for pi := 0; pi < 4; pi++ { inpat := inPats.SubSpace(pi) - inLay.ApplyExt(ctx, 0, inpat) - outLay.ApplyExt(ctx, 0, inpat) + inLay.ApplyExt(0, inpat) + outLay.ApplyExt(0, inpat) - inhibNet.NewState(ctx) - ctx.NewState(etime.Train) + inhibNet.NewState(etime.Train) for qtr := 0; qtr < 4; qtr++ { for cyc := 0; cyc < cycPerQtr; cyc++ { inhibNet.Cycle() - ctx.CycleInc() if printCycs { inLay.UnitValues(&inActs, "Act", 0) @@ -1328,9 +1312,9 @@ func TestInhibAct(t *testing.T) { } } if qtr == 2 { - inhibNet.MinusPhase(ctx) + inhibNet.MinusPhase() ctx.NewPhase(false) - inhibNet.PlusPhaseStart(ctx) + inhibNet.PlusPhaseStart() } if printCycs && printQtrs { @@ -1370,7 +1354,7 @@ func TestInhibAct(t *testing.T) { CompareFloats(tol, outGis, qtr3OutGis, "qtr3OutGis", t) } } - inhibNet.PlusPhase(ctx) + inhibNet.PlusPhase() if printQtrs { fmt.Printf("=============================\n") @@ -1394,7 +1378,7 @@ func saveToFile(net *Network, t *testing.T) { func TestSendGatherIndexes(t *testing.T) { nData := uint32(3) - net := newTestNet(ctx, int(nData)) + net := newTestNet(int(nData)) maxDel := net.NetIxs().MaxDelay + 1 maxCyc := int32(2 * maxDel) @@ -1549,7 +1533,7 @@ func TestRubiconGiveUp(t *testing.T) { // // // fmt.Printf("Wts Mean: %g\t Var: %g\t SPct: %g\n", mean, vr, spct) // for i := 0; i < nsamp; i++ { -// pj.SWts.InitWeightsSyn(ctx, &nt.Rand, sy, mean, spct) +// pj.SWts.InitWeightsSyn(&nt.Rand, sy, mean, spct) // dt.SetFloat("Wt", i, float64(sy.Wt)) // dt.SetFloat("LWt", i, float64(sy.LWt)) // dt.SetFloat("SWt", i, float64(sy.SWt)) diff --git a/axon/context.go b/axon/context.go index 7855c137..e32b563f 100644 --- a/axon/context.go +++ b/axon/context.go @@ -17,6 +17,7 @@ import ( // It is passed around to all relevant computational functions, // and is updated on the CPU and synced to the GPU after every cycle. // It contains timing, Testing vs. Training mode, random number context, etc. +// There is one canonical instance on the network as Ctx type Context struct { // number of data parallel items to process currently. @@ -154,7 +155,7 @@ func (ctx *Context) Reset() { ctx.Defaults() } // ctx.RandCtr.Reset() - GlobalsReset(ctx) + GlobalsReset() } // NewContext returns a new Time struct with default parameters diff --git a/axon/hip_net.go b/axon/hip_net.go index 53e87ef8..109ebfb4 100644 --- a/axon/hip_net.go +++ b/axon/hip_net.go @@ -231,7 +231,7 @@ func (net *Network) ConfigLoopsHip(ctx *Context, man *looper.Manager, hip *HipCo ca3FromDg.Params.PathScale.Rel = dgPjScale * (1 - hip.MossyDelta) // turn off DG input to CA3 in first quarter - net.InitGScale(ctx) // update computed scaling factors + net.InitGScale() // update computed scaling factors // net.GPU.SyncParamsToGPU() // todo: }) beta1 := cyc.EventByName("Beta1") @@ -241,7 +241,7 @@ func (net *Network) ConfigLoopsHip(ctx *Context, man *looper.Manager, hip *HipCo if ctx.Mode == etime.Test { ca3FromDg.Params.PathScale.Rel = dgPjScale * (1 - hip.MossyDeltaTest) } - net.InitGScale(ctx) // update computed scaling factors + net.InitGScale() // update computed scaling factors // net.GPU.SyncParamsToGPU() // TODO: }) plus := cyc.EventByName("PlusPhase") @@ -260,19 +260,19 @@ func (net *Network) ConfigLoopsHip(ctx *Context, man *looper.Manager, hip *HipCo // if hip.EC5ClampThr > 0 { // stats.Binarize(tmpValues, tensor.NewFloat64Scalar(hip.EC5ClampThr)) // } - ec5.ApplyExt1D32(ctx, di, tmpValues) + ec5.ApplyExt1D32(di, tmpValues) } } } - net.InitGScale(ctx) // update computed scaling factors + net.InitGScale() // update computed scaling factors // net.GPU.SyncParamsToGPU() - net.ApplyExts(ctx) // essential for GPU + net.ApplyExts() // essential for GPU }) trl := stack.Loops[etime.Trial] trl.OnEnd.Prepend("HipPlusPhase:End", func() { ca1FromCa3.Params.PathScale.Rel = hip.ThetaHigh - net.InitGScale(ctx) // update computed scaling factors + net.InitGScale() // update computed scaling factors // net.GPU.SyncParamsToGPU() }) } diff --git a/axon/layer-cpu.go b/axon/layer-cpu.go index b9386779..c39a23f1 100644 --- a/axon/layer-cpu.go +++ b/axon/layer-cpu.go @@ -22,7 +22,7 @@ import ( //////// Phase-level -// todo: all of this could be moved to layer params: +// todo: all of this should be moved to layer params: // NewState handles all initialization at start of new input pattern. // Does NOT call InitGScale() @@ -349,8 +349,7 @@ func (ly *Layer) PhaseDiffFromActs(ctx *Context) { } } -////////////////////////////////////////////////////////////////////////////////////// -// Learning +//////// Learning // DTrgSubMean subtracts the mean from DTrgAvg values // Called by TrgAvgFromD diff --git a/axon/layer-cpu.goal b/axon/layer-cpu.goal index 3c7b42dd..4e467688 100644 --- a/axon/layer-cpu.goal +++ b/axon/layer-cpu.goal @@ -20,7 +20,7 @@ import ( //////// Phase-level -// todo: all of this could be moved to layer params: +// todo: all of this should be moved to layer params: // NewState handles all initialization at start of new input pattern. // Does NOT call InitGScale() @@ -347,8 +347,7 @@ func (ly *Layer) PhaseDiffFromActs(ctx *Context) { } } -////////////////////////////////////////////////////////////////////////////////////// -// Learning +//////// Learning // DTrgSubMean subtracts the mean from DTrgAvg values // Called by TrgAvgFromD diff --git a/axon/layer.go b/axon/layer.go index e3b0b9c2..71ea7650 100644 --- a/axon/layer.go +++ b/axon/layer.go @@ -189,8 +189,7 @@ func JsonToParams(b []byte) string { // note: all basic computation can be performed on layer-level and path level -////////////////////////////////////////////////////////////////////////////////////// -// Init methods +//////// Init methods // InitWeights initializes the weight values in the network, i.e., resetting learning // Also calls InitActs @@ -396,12 +395,11 @@ func (ly *Layer) InitWtSym(ctx *Context) { } } -////////////////////////////////////////////////////////////////////////////////////// -// ApplyExt +//////// ApplyExt // InitExt initializes external input state. // Should be called prior to ApplyExt on all layers receiving Ext input. -func (ly *Layer) InitExt(ctx *Context) { +func (ly *Layer) InitExt() { if !ly.Type.IsExt() { return } @@ -412,7 +410,7 @@ func (ly *Layer) InitExt(ctx *Context) { continue } for di := uint32(0); di < ly.MaxData; di++ { - ly.Params.InitExt(ctx, ni, di) + ly.Params.InitExt(ni, di) Exts.Set(-1, int(ly.Params.Indexes.ExtsSt+lni), int(di)) // missing by default } } @@ -430,23 +428,23 @@ func (ly *Layer) InitExt(ctx *Context) { // otherwise it goes in Ext. // Also sets the Exts values on layer, which are used for the GPU version, // which requires calling the network ApplyExts() method -- is a no-op for CPU. -func (ly *Layer) ApplyExt(ctx *Context, di uint32, ext tensor.Tensor) { +func (ly *Layer) ApplyExt(di uint32, ext tensor.Tensor) { switch { case ext.NumDims() == 2 && ly.Shape.NumDims() == 4: // special case - ly.ApplyExt2Dto4D(ctx, di, ext) + ly.ApplyExt2Dto4D(di, ext) case ext.NumDims() != ly.Shape.NumDims() || !(ext.NumDims() == 2 || ext.NumDims() == 4): - ly.ApplyExt1DTsr(ctx, di, ext) + ly.ApplyExt1DTsr(di, ext) case ext.NumDims() == 2: - ly.ApplyExt2D(ctx, di, ext) + ly.ApplyExt2D(di, ext) case ext.NumDims() == 4: - ly.ApplyExt4D(ctx, di, ext) + ly.ApplyExt4D(di, ext) } } // ApplyExtVal applies given external value to given neuron // using clearMask, setMask, and toTarg from ApplyExtFlags. // Also saves Val in Exts for potential use by GPU. -func (ly *Layer) ApplyExtValue(ctx *Context, lni, di uint32, val float32, clearMask, setMask NeuronFlags, toTarg bool) { +func (ly *Layer) ApplyExtValue(lni, di uint32, val float32, clearMask, setMask NeuronFlags, toTarg bool) { ni := ly.NeurStIndex + lni if NrnIsOff(ni) { return @@ -472,7 +470,7 @@ func (ly *Layer) ApplyExtFlags() (clearMask, setMask NeuronFlags, toTarg bool) { } // ApplyExt2D applies 2D tensor external input -func (ly *Layer) ApplyExt2D(ctx *Context, di uint32, ext tensor.Tensor) { +func (ly *Layer) ApplyExt2D(di uint32, ext tensor.Tensor) { clearMask, setMask, toTarg := ly.ApplyExtFlags() ymx := min(ext.DimSize(0), ly.Shape.DimSize(0)) xmx := min(ext.DimSize(1), ly.Shape.DimSize(1)) @@ -481,13 +479,13 @@ func (ly *Layer) ApplyExt2D(ctx *Context, di uint32, ext tensor.Tensor) { idx := []int{y, x} val := float32(ext.Float(idx...)) lni := uint32(ly.Shape.IndexTo1D(idx...)) - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } } // ApplyExt2Dto4D applies 2D tensor external input to a 4D layer -func (ly *Layer) ApplyExt2Dto4D(ctx *Context, di uint32, ext tensor.Tensor) { +func (ly *Layer) ApplyExt2Dto4D(di uint32, ext tensor.Tensor) { clearMask, setMask, toTarg := ly.ApplyExtFlags() lNy, lNx, _, _ := tensor.Projection2DShape(&ly.Shape, false) @@ -498,13 +496,13 @@ func (ly *Layer) ApplyExt2Dto4D(ctx *Context, di uint32, ext tensor.Tensor) { idx := []int{y, x} val := float32(ext.Float(idx...)) lni := uint32(tensor.Projection2DIndex(&ly.Shape, false, y, x)) - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } } // ApplyExt4D applies 4D tensor external input -func (ly *Layer) ApplyExt4D(ctx *Context, di uint32, ext tensor.Tensor) { +func (ly *Layer) ApplyExt4D(di uint32, ext tensor.Tensor) { clearMask, setMask, toTarg := ly.ApplyExtFlags() ypmx := min(ext.DimSize(0), ly.Shape.DimSize(0)) xpmx := min(ext.DimSize(1), ly.Shape.DimSize(1)) @@ -517,7 +515,7 @@ func (ly *Layer) ApplyExt4D(ctx *Context, di uint32, ext tensor.Tensor) { idx := []int{yp, xp, yn, xn} val := float32(ext.Float(idx...)) lni := uint32(ly.Shape.IndexTo1D(idx...)) - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } } @@ -527,36 +525,36 @@ func (ly *Layer) ApplyExt4D(ctx *Context, di uint32, ext tensor.Tensor) { // ApplyExt1DTsr applies external input using 1D flat interface into tensor. // If the layer is a Target or Compare layer type, then it goes in Target // otherwise it goes in Ext -func (ly *Layer) ApplyExt1DTsr(ctx *Context, di uint32, ext tensor.Tensor) { +func (ly *Layer) ApplyExt1DTsr(di uint32, ext tensor.Tensor) { clearMask, setMask, toTarg := ly.ApplyExtFlags() mx := uint32(min(ext.Len(), int(ly.NNeurons))) for lni := uint32(0); lni < mx; lni++ { val := float32(ext.Float1D(int(lni))) - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } // ApplyExt1D applies external input in the form of a flat 1-dimensional slice of floats // If the layer is a Target or Compare layer type, then it goes in Target // otherwise it goes in Ext -func (ly *Layer) ApplyExt1D(ctx *Context, di uint32, ext []float64) { +func (ly *Layer) ApplyExt1D(di uint32, ext []float64) { clearMask, setMask, toTarg := ly.ApplyExtFlags() mx := uint32(min(len(ext), int(ly.NNeurons))) for lni := uint32(0); lni < mx; lni++ { val := float32(ext[lni]) - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } // ApplyExt1D32 applies external input in the form of a flat 1-dimensional slice of float32s. // If the layer is a Target or Compare layer type, then it goes in Target // otherwise it goes in Ext -func (ly *Layer) ApplyExt1D32(ctx *Context, di uint32, ext []float32) { +func (ly *Layer) ApplyExt1D32(di uint32, ext []float32) { clearMask, setMask, toTarg := ly.ApplyExtFlags() mx := uint32(min(len(ext), int(ly.NNeurons))) for lni := uint32(0); lni < mx; lni++ { val := ext[lni] - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } @@ -578,8 +576,7 @@ func (ly *Layer) UpdateExtFlags(ctx *Context) { } } -////////////////////////////////////////////////////////////////////////////////////// -// InitGScale +//////// InitGScale // InitGScale computes the initial scaling factor for synaptic input conductances G, // stored in GScale.Scale, based on sending layer initial activation. @@ -656,28 +653,7 @@ func (ly *Layer) InitGScale(ctx *Context) { } } -////////////////////////////////////////////////////////////////////////////////////// -// Threading / Reports - -// CostEst returns the estimated computational cost associated with this layer, -// separated by neuron-level and synapse-level, in arbitrary units where -// cost per synapse is 1. Neuron-level computation is more expensive but -// there are typically many fewer neurons, so in larger networks, synaptic -// costs tend to dominate. Neuron cost is estimated from TimerReport output -// for large networks. -func (ly *Layer) CostEst() (neur, syn, tot int) { - perNeur := 300 // cost per neuron, relative to synapse which is 1 - neur = int(ly.NNeurons) * perNeur - syn = 0 - for _, pt := range ly.SendPaths { - syn += int(pt.NSyns) - } - tot = neur + syn - return -} - -////////////////////////////////////////////////////////////////////////////////////// -// Stats +//////// Stats // note: use float64 for stats as that is best for logging @@ -816,8 +792,7 @@ func (ly *Layer) TestValues(ctrKey string, vals map[string]float32) { } } -////////////////////////////////////////////////////////////////////////////////////// -// Lesion +//////// Lesion // UnLesionNeurons unlesions (clears the Off flag) for all neurons in the layer func (ly *Layer) UnLesionNeurons() { //types:add @@ -858,6 +833,7 @@ func (ly *Layer) LesionNeurons(prop float32) int { //types:add return nl } +// MakeToolbar is the standard core GUI toolbar for the layer when edited. func (ly *Layer) MakeToolbar(p *tree.Plan) { tree.Add(p, func(w *core.FuncButton) { w.SetFunc(ly.Defaults).SetIcon(icons.Reset) diff --git a/axon/layer.goal b/axon/layer.goal index f368c914..35e930a8 100644 --- a/axon/layer.goal +++ b/axon/layer.goal @@ -187,8 +187,7 @@ func JsonToParams(b []byte) string { // note: all basic computation can be performed on layer-level and path level -////////////////////////////////////////////////////////////////////////////////////// -// Init methods +//////// Init methods // InitWeights initializes the weight values in the network, i.e., resetting learning // Also calls InitActs @@ -394,12 +393,11 @@ func (ly *Layer) InitWtSym(ctx *Context) { } } -////////////////////////////////////////////////////////////////////////////////////// -// ApplyExt +//////// ApplyExt // InitExt initializes external input state. // Should be called prior to ApplyExt on all layers receiving Ext input. -func (ly *Layer) InitExt(ctx *Context) { +func (ly *Layer) InitExt() { if !ly.Type.IsExt() { return } @@ -410,7 +408,7 @@ func (ly *Layer) InitExt(ctx *Context) { continue } for di := uint32(0); di < ly.MaxData; di++ { - ly.Params.InitExt(ctx, ni, di) + ly.Params.InitExt(ni, di) Exts[ly.Params.Indexes.ExtsSt + lni, di] = -1 // missing by default } } @@ -428,23 +426,23 @@ func (ly *Layer) InitExt(ctx *Context) { // otherwise it goes in Ext. // Also sets the Exts values on layer, which are used for the GPU version, // which requires calling the network ApplyExts() method -- is a no-op for CPU. -func (ly *Layer) ApplyExt(ctx *Context, di uint32, ext tensor.Tensor) { +func (ly *Layer) ApplyExt(di uint32, ext tensor.Tensor) { switch { case ext.NumDims() == 2 && ly.Shape.NumDims() == 4: // special case - ly.ApplyExt2Dto4D(ctx, di, ext) + ly.ApplyExt2Dto4D(di, ext) case ext.NumDims() != ly.Shape.NumDims() || !(ext.NumDims() == 2 || ext.NumDims() == 4): - ly.ApplyExt1DTsr(ctx, di, ext) + ly.ApplyExt1DTsr(di, ext) case ext.NumDims() == 2: - ly.ApplyExt2D(ctx, di, ext) + ly.ApplyExt2D(di, ext) case ext.NumDims() == 4: - ly.ApplyExt4D(ctx, di, ext) + ly.ApplyExt4D(di, ext) } } // ApplyExtVal applies given external value to given neuron // using clearMask, setMask, and toTarg from ApplyExtFlags. // Also saves Val in Exts for potential use by GPU. -func (ly *Layer) ApplyExtValue(ctx *Context, lni, di uint32, val float32, clearMask, setMask NeuronFlags, toTarg bool) { +func (ly *Layer) ApplyExtValue(lni, di uint32, val float32, clearMask, setMask NeuronFlags, toTarg bool) { ni := ly.NeurStIndex + lni if NrnIsOff(ni) { return @@ -470,7 +468,7 @@ func (ly *Layer) ApplyExtFlags() (clearMask, setMask NeuronFlags, toTarg bool) { } // ApplyExt2D applies 2D tensor external input -func (ly *Layer) ApplyExt2D(ctx *Context, di uint32, ext tensor.Tensor) { +func (ly *Layer) ApplyExt2D(di uint32, ext tensor.Tensor) { clearMask, setMask, toTarg := ly.ApplyExtFlags() ymx := min(ext.DimSize(0), ly.Shape.DimSize(0)) xmx := min(ext.DimSize(1), ly.Shape.DimSize(1)) @@ -479,13 +477,13 @@ func (ly *Layer) ApplyExt2D(ctx *Context, di uint32, ext tensor.Tensor) { idx := []int{y, x} val := float32(ext.Float(idx...)) lni := uint32(ly.Shape.IndexTo1D(idx...)) - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } } // ApplyExt2Dto4D applies 2D tensor external input to a 4D layer -func (ly *Layer) ApplyExt2Dto4D(ctx *Context, di uint32, ext tensor.Tensor) { +func (ly *Layer) ApplyExt2Dto4D(di uint32, ext tensor.Tensor) { clearMask, setMask, toTarg := ly.ApplyExtFlags() lNy, lNx, _, _ := tensor.Projection2DShape(&ly.Shape, false) @@ -496,13 +494,13 @@ func (ly *Layer) ApplyExt2Dto4D(ctx *Context, di uint32, ext tensor.Tensor) { idx := []int{y, x} val := float32(ext.Float(idx...)) lni := uint32(tensor.Projection2DIndex(&ly.Shape, false, y, x)) - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } } // ApplyExt4D applies 4D tensor external input -func (ly *Layer) ApplyExt4D(ctx *Context, di uint32, ext tensor.Tensor) { +func (ly *Layer) ApplyExt4D(di uint32, ext tensor.Tensor) { clearMask, setMask, toTarg := ly.ApplyExtFlags() ypmx := min(ext.DimSize(0), ly.Shape.DimSize(0)) xpmx := min(ext.DimSize(1), ly.Shape.DimSize(1)) @@ -515,7 +513,7 @@ func (ly *Layer) ApplyExt4D(ctx *Context, di uint32, ext tensor.Tensor) { idx := []int{yp, xp, yn, xn} val := float32(ext.Float(idx...)) lni := uint32(ly.Shape.IndexTo1D(idx...)) - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } } @@ -525,36 +523,36 @@ func (ly *Layer) ApplyExt4D(ctx *Context, di uint32, ext tensor.Tensor) { // ApplyExt1DTsr applies external input using 1D flat interface into tensor. // If the layer is a Target or Compare layer type, then it goes in Target // otherwise it goes in Ext -func (ly *Layer) ApplyExt1DTsr(ctx *Context, di uint32, ext tensor.Tensor) { +func (ly *Layer) ApplyExt1DTsr(di uint32, ext tensor.Tensor) { clearMask, setMask, toTarg := ly.ApplyExtFlags() mx := uint32(min(ext.Len(), int(ly.NNeurons))) for lni := uint32(0); lni < mx; lni++ { val := float32(ext.Float1D(int(lni))) - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } // ApplyExt1D applies external input in the form of a flat 1-dimensional slice of floats // If the layer is a Target or Compare layer type, then it goes in Target // otherwise it goes in Ext -func (ly *Layer) ApplyExt1D(ctx *Context, di uint32, ext []float64) { +func (ly *Layer) ApplyExt1D(di uint32, ext []float64) { clearMask, setMask, toTarg := ly.ApplyExtFlags() mx := uint32(min(len(ext), int(ly.NNeurons))) for lni := uint32(0); lni < mx; lni++ { val := float32(ext[lni]) - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } // ApplyExt1D32 applies external input in the form of a flat 1-dimensional slice of float32s. // If the layer is a Target or Compare layer type, then it goes in Target // otherwise it goes in Ext -func (ly *Layer) ApplyExt1D32(ctx *Context, di uint32, ext []float32) { +func (ly *Layer) ApplyExt1D32(di uint32, ext []float32) { clearMask, setMask, toTarg := ly.ApplyExtFlags() mx := uint32(min(len(ext), int(ly.NNeurons))) for lni := uint32(0); lni < mx; lni++ { val := ext[lni] - ly.ApplyExtValue(ctx, lni, di, val, clearMask, setMask, toTarg) + ly.ApplyExtValue(lni, di, val, clearMask, setMask, toTarg) } } @@ -576,8 +574,7 @@ func (ly *Layer) UpdateExtFlags(ctx *Context) { } } -////////////////////////////////////////////////////////////////////////////////////// -// InitGScale +//////// InitGScale // InitGScale computes the initial scaling factor for synaptic input conductances G, // stored in GScale.Scale, based on sending layer initial activation. @@ -653,28 +650,7 @@ func (ly *Layer) InitGScale(ctx *Context) { } } -////////////////////////////////////////////////////////////////////////////////////// -// Threading / Reports - -// CostEst returns the estimated computational cost associated with this layer, -// separated by neuron-level and synapse-level, in arbitrary units where -// cost per synapse is 1. Neuron-level computation is more expensive but -// there are typically many fewer neurons, so in larger networks, synaptic -// costs tend to dominate. Neuron cost is estimated from TimerReport output -// for large networks. -func (ly *Layer) CostEst() (neur, syn, tot int) { - perNeur := 300 // cost per neuron, relative to synapse which is 1 - neur = int(ly.NNeurons) * perNeur - syn = 0 - for _, pt := range ly.SendPaths { - syn += int(pt.NSyns) - } - tot = neur + syn - return -} - -////////////////////////////////////////////////////////////////////////////////////// -// Stats +//////// Stats // note: use float64 for stats as that is best for logging @@ -813,8 +789,7 @@ func (ly *Layer) TestValues(ctrKey string, vals map[string]float32) { } } -////////////////////////////////////////////////////////////////////////////////////// -// Lesion +//////// Lesion // UnLesionNeurons unlesions (clears the Off flag) for all neurons in the layer func (ly *Layer) UnLesionNeurons() { //types:add @@ -855,6 +830,7 @@ func (ly *Layer) LesionNeurons(prop float32) int { //types:add return nl } +// MakeToolbar is the standard core GUI toolbar for the layer when edited. func (ly *Layer) MakeToolbar(p *tree.Plan) { tree.Add(p, func(w *core.FuncButton) { w.SetFunc(ly.Defaults).SetIcon(icons.Reset) @@ -873,3 +849,4 @@ func (ly *Layer) MakeToolbar(p *tree.Plan) { w.SetFunc(ly.UnLesionNeurons).SetIcon(icons.Cut) }) } + diff --git a/axon/layer_test.go b/axon/layer_test.go index 511d8d50..d0f6f161 100644 --- a/axon/layer_test.go +++ b/axon/layer_test.go @@ -17,9 +17,7 @@ func TestLayer(t *testing.T) { inputLayer := net.AddLayer("Input", InputLayer, shape...) hiddenLayer := net.AddLayer("Hidden", SuperLayer, shape...) outputLayer := net.AddLayer("Output", TargetLayer, shape...) - - ctx := NewContext() - assert.NoError(t, net.Build(ctx)) + assert.NoError(t, net.Build()) assert.True(t, inputLayer.Params.IsInput()) assert.False(t, outputLayer.Params.IsInput()) @@ -174,9 +172,9 @@ func createNetwork(ctx *Context, shape []int, t *testing.T) *Network { full := paths.NewFull() net.ConnectLayers(inputLayer, hiddenLayer, full, ForwardPath) net.BidirConnectLayers(hiddenLayer, outputLayer, full) - assert.NoError(t, net.Build(ctx)) + assert.NoError(t, net.Build()) net.Defaults() - net.InitWeights(ctx) + net.InitWeights() return net } @@ -193,9 +191,7 @@ func TestLayerBase_IsOff(t *testing.T) { in2ToHid := net.ConnectLayers(inputLayer2, hiddenLayer, full, ForwardPath) hidToOut, outToHid := net.BidirConnectLayers(hiddenLayer, outputLayer, full) - ctx := NewContext() - - assert.NoError(t, net.Build(ctx)) + assert.NoError(t, net.Build()) net.Defaults() assert.False(t, inputLayer.Off) diff --git a/axon/layerbase.go b/axon/layerbase.go index c6730762..f647c27b 100644 --- a/axon/layerbase.go +++ b/axon/layerbase.go @@ -28,37 +28,40 @@ import ( type Layer struct { emer.LayerBase - // layer parameters. + // Params are layer parameters (pointer to item in Network.LayerParams). Params *LayerParams // our parent network, in case we need to use it to find // other layers etc; set when added by network. Network *Network `copier:"-" json:"-" xml:"-" display:"-"` - // type of layer. + // Type is the type of layer, which drives specialized computation as needed. Type LayerTypes - // number of neurons in the layer. + // NNeurons is the number of neurons in the layer. NNeurons uint32 `display:"-"` - // starting index of neurons for this layer within the global Network list. + // NeurStIndex is the starting index of neurons for this layer within + // the global Network list. NeurStIndex uint32 `display:"-" inactive:"-"` - // number of pools based on layer shape; at least 1 for layer pool + 4D subpools. + // NPools is the number of inhibitory pools based on layer shape, + // with the first one representing the entire set of neurons in the layer, + // and 4D shaped layers have sub-pools after that. NPools uint32 `display:"-"` - // maximum amount of input data that can be processed in parallel - // in one pass of the network. + // MaxData is the maximum amount of input data that can be processed in + // parallel in one pass of the network (copied from [NetworkIndexes]). // Neuron, Pool, Values storage is allocated to hold this amount. MaxData uint32 `display:"-"` - // list of receiving pathways into this layer from other layers + // RecvPaths is the list of receiving pathways into this layer from other layers. RecvPaths []*Path - // list of sending pathways from this layer to other layers + // SendPaths is the list of sending pathways from this layer to other layers. SendPaths []*Path - // configuration data set when the network is configured, + // BuildConfig has configuration data set when the network is configured, // that is used during the network Build() process via PostBuild method, // after all the structure of the network has been fully constructed. // In particular, the Params is nil until Build, so setting anything @@ -68,8 +71,8 @@ type Layer struct { // algorithm structural parameters set during ConfigNet() methods. BuildConfig map[string]string `table:"-"` - // default parameters that are applied prior to user-set parameters. - // These are useful for specific layer functionality in specialized + // DefaultParams are default parameters that are applied prior to user-set + // parameters. These are useful for specific layer functionality in specialized // brain areas (e.g., Rubicon, BG etc) not associated with a layer type, // which otherwise is used to hard-code initial default parameters. // Typically just set to a literal map. @@ -99,11 +102,6 @@ func (ly *Layer) SetOff(off bool) { } } -// SubPool returns subpool index for given neuron, at data index -// func (ly *Layer) SubPool(ni, di uint32) uint32 { -// return pi := NeuronIxs[NrnSubPool, ni] -// } - // RecipToSendPath finds the reciprocal pathway to // the given sending pathway within the ly layer. // i.e., where ly is instead the *receiving* layer from same other layer B @@ -475,8 +473,7 @@ func (ly *Layer) VarRange(varNm string) (min, max float32, err error) { return } -//////////////////////////////////////////// -// Weights +//////// Weights // WriteWeightsJSON writes the weights from this layer from the receiver-side perspective // in a JSON text format. We build in the indentation logic to make it much faster and diff --git a/axon/layerbase.goal b/axon/layerbase.goal index 060595cb..a7c5452f 100644 --- a/axon/layerbase.goal +++ b/axon/layerbase.goal @@ -26,48 +26,51 @@ import ( type Layer struct { emer.LayerBase - // layer parameters. + // Params are layer parameters (pointer to item in Network.LayerParams). Params *LayerParams // our parent network, in case we need to use it to find // other layers etc; set when added by network. Network *Network `copier:"-" json:"-" xml:"-" display:"-"` - // type of layer. + // Type is the type of layer, which drives specialized computation as needed. Type LayerTypes - // number of neurons in the layer. + // NNeurons is the number of neurons in the layer. NNeurons uint32 `display:"-"` - // starting index of neurons for this layer within the global Network list. + // NeurStIndex is the starting index of neurons for this layer within + // the global Network list. NeurStIndex uint32 `display:"-" inactive:"-"` - // number of pools based on layer shape; at least 1 for layer pool + 4D subpools. + // NPools is the number of inhibitory pools based on layer shape, + // with the first one representing the entire set of neurons in the layer, + // and 4D shaped layers have sub-pools after that. NPools uint32 `display:"-"` - // maximum amount of input data that can be processed in parallel - // in one pass of the network. + // MaxData is the maximum amount of input data that can be processed in + // parallel in one pass of the network (copied from [NetworkIndexes]). // Neuron, Pool, Values storage is allocated to hold this amount. MaxData uint32 `display:"-"` - // list of receiving pathways into this layer from other layers + // RecvPaths is the list of receiving pathways into this layer from other layers. RecvPaths []*Path - // list of sending pathways from this layer to other layers + // SendPaths is the list of sending pathways from this layer to other layers. SendPaths []*Path - // configuration data set when the network is configured, + // BuildConfig has configuration data set when the network is configured, // that is used during the network Build() process via PostBuild method, // after all the structure of the network has been fully constructed. // In particular, the Params is nil until Build, so setting anything // specific in there (e.g., an index to another layer) must be done - //as a second pass. Note that Params are all applied after Build + // as a second pass. Note that Params are all applied after Build // and can set user-modifiable params, so this is for more special // algorithm structural parameters set during ConfigNet() methods. BuildConfig map[string]string `table:"-"` - // default parameters that are applied prior to user-set parameters. - // These are useful for specific layer functionality in specialized + // DefaultParams are default parameters that are applied prior to user-set + // parameters. These are useful for specific layer functionality in specialized // brain areas (e.g., Rubicon, BG etc) not associated with a layer type, // which otherwise is used to hard-code initial default parameters. // Typically just set to a literal map. @@ -97,11 +100,6 @@ func (ly *Layer) SetOff(off bool) { } } -// SubPool returns subpool index for given neuron, at data index -// func (ly *Layer) SubPool(ni, di uint32) uint32 { -// return pi := NeuronIxs[NrnSubPool, ni] -// } - // RecipToSendPath finds the reciprocal pathway to // the given sending pathway within the ly layer. // i.e., where ly is instead the *receiving* layer from same other layer B @@ -473,8 +471,7 @@ func (ly *Layer) VarRange(varNm string) (min, max float32, err error) { return } -//////////////////////////////////////////// -// Weights +//////// Weights // WriteWeightsJSON writes the weights from this layer from the receiver-side perspective // in a JSON text format. We build in the indentation logic to make it much faster and @@ -561,3 +558,4 @@ func (ly *Layer) SetWeights(lw *weights.Layer) error { ly.AvgDifFromTrgAvg(ctx) // update AvgPct based on loaded ActAvg values return err } + diff --git a/axon/layerparams.go b/axon/layerparams.go index 98fd9a35..1a25b821 100644 --- a/axon/layerparams.go +++ b/axon/layerparams.go @@ -8,7 +8,6 @@ package axon import ( "encoding/json" - "fmt" "cogentcore.org/core/math32" "github.com/emer/axon/v2/fsfffb" @@ -315,7 +314,7 @@ func (ly *LayerParams) ApplyExtFlags(clearMask, setMask *NeuronFlags, toTarg *bo } // InitExt initializes external input state for given neuron -func (ly *LayerParams) InitExt(ctx *Context, ni, di uint32) { +func (ly *LayerParams) InitExt(ni, di uint32) { Neurons.Set(0.0, int(Ext), int(ni), int(di)) Neurons.Set(0.0, int(Target), int(ni), int(di)) NrnClearFlag(ni, di, NeuronHasExt|NeuronHasTarg|NeuronHasCmpr) @@ -325,7 +324,7 @@ func (ly *LayerParams) InitExt(ctx *Context, ni, di uint32) { // setting flags based on type of layer. // Should only be called on Input, Target, Compare layers. // Negative values are not valid, and will be interpreted as missing inputs. -func (ly *LayerParams) ApplyExtValue(ctx *Context, ni, di uint32, val float32) { +func (ly *LayerParams) ApplyExtValue(ni, di uint32, val float32) { if val < 0 { return } @@ -341,12 +340,12 @@ func (ly *LayerParams) ApplyExtValue(ctx *Context, ni, di uint32, val float32) { NrnSetFlag(ni, di, setMask) } -func (ly *LayerParams) ApplyExtsNeuron(ctx *Context, ni, di uint32) { +func (ly *LayerParams) ApplyExtsNeuron(ni, di uint32) { lni := ni - ly.Indexes.NeurSt // layer-based - ly.InitExt(ctx, ni, di) + ly.InitExt(ni, di) if IsExtLayerType(ly.Type) { ei := ly.Indexes.ExtsSt + lni - ly.ApplyExtValue(ctx, ni, di, Exts.Value(int(ei), int(di))) + ly.ApplyExtValue(ni, di, Exts.Value(int(ei), int(di))) } } @@ -627,7 +626,7 @@ func (ly *LayerParams) SpecialPreGs(ctx *Context, pi, ni, di uint32, drvGe float Neurons.Set(geRaw, int(GeRaw), int(ni), int(di)) Neurons.Set(ly.Acts.Dt.GeSynFromRawSteady(geRaw), int(GeSyn), int(ni), int(di)) case USLayer: - us := RubiconUSStimValue(ctx, di, pi0, ly.Learn.NeuroMod.Valence) + us := RubiconUSStimValue(di, pi0, ly.Learn.NeuroMod.Valence) geRaw := us if us > 0 { geRaw = ly.Acts.PopCode.EncodeGe(pni, pnn, us) @@ -801,27 +800,11 @@ func (ly *LayerParams) SpikeFromG(ctx *Context, lpi, ni, di uint32) { Neurons.Set(spkmax, int(SpkMax), int(ni), int(di)) } } - spksper := ctx.ThetaCycles / 8 - bin := ctx.Cycle / spksper spk := Neurons.Value(int(Spike), int(ni), int(di)) - fmt.Println(ctx.Cycle, bin) - switch bin { - case 0: - Neurons.SetAdd(spk, int(SpkBin0), int(ni), int(di)) - case 1: - Neurons.SetAdd(spk, int(SpkBin1), int(ni), int(di)) - case 2: - Neurons.SetAdd(spk, int(SpkBin2), int(ni), int(di)) - case 3: - Neurons.SetAdd(spk, int(SpkBin3), int(ni), int(di)) - case 4: - Neurons.SetAdd(spk, int(SpkBin4), int(ni), int(di)) - case 5: - Neurons.SetAdd(spk, int(SpkBin5), int(ni), int(di)) - case 6: - Neurons.SetAdd(spk, int(SpkBin6), int(ni), int(di)) - default: - Neurons.SetAdd(spk, int(SpkBin7), int(ni), int(di)) + if spk > 0 { + spksper := ctx.ThetaCycles / 8 + bin := min(ctx.Cycle/spksper, 7) + Neurons.SetAdd(spk, int(SpkBin0+NeuronVars(bin)), int(ni), int(di)) } } @@ -909,7 +892,7 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, lpi, pi, ni, di uint32) { } Neurons.Set(act, int(Act), int(ni), int(di)) case USLayer: - us := RubiconUSStimValue(ctx, di, pi0, ly.Learn.NeuroMod.Valence) + us := RubiconUSStimValue(di, pi0, ly.Learn.NeuroMod.Valence) act := us if us > 0 { act = ly.Acts.PopCode.EncodeValue(pni, pnn, us) @@ -1164,16 +1147,6 @@ func (ly *LayerParams) NewStateNeuron(ctx *Context, ni, di uint32) { Neurons.Set(Neurons.Value(int(CaSpkD), int(ni), int(di)), int(SpkPrv), int(ni), int(di)) Neurons.Set(0.0, int(SpkMax), int(ni), int(di)) Neurons.Set(0.0, int(SpkMaxCa), int(ni), int(di)) - - Neurons.Set(0.0, int(SpkBin0), int(ni), int(di)) - Neurons.Set(0.0, int(SpkBin1), int(ni), int(di)) - Neurons.Set(0.0, int(SpkBin2), int(ni), int(di)) - Neurons.Set(0.0, int(SpkBin3), int(ni), int(di)) - Neurons.Set(0.0, int(SpkBin4), int(ni), int(di)) - Neurons.Set(0.0, int(SpkBin5), int(ni), int(di)) - Neurons.Set(0.0, int(SpkBin6), int(ni), int(di)) - Neurons.Set(0.0, int(SpkBin7), int(ni), int(di)) - ly.Acts.DecayState(ctx, ni, di, ly.Acts.Decay.Act, ly.Acts.Decay.Glong, ly.Acts.Decay.AHP) // Note: synapse-level Ca decay happens in DWt ly.Acts.KNaNewState(ctx, ni, di) diff --git a/axon/layerparams.goal b/axon/layerparams.goal index 1eb0e44a..47b7ac96 100644 --- a/axon/layerparams.goal +++ b/axon/layerparams.goal @@ -5,7 +5,6 @@ package axon import ( - "fmt" "encoding/json" "cogentcore.org/core/math32" @@ -313,7 +312,7 @@ func (ly *LayerParams) ApplyExtFlags(clearMask, setMask *NeuronFlags, toTarg *bo } // InitExt initializes external input state for given neuron -func (ly *LayerParams) InitExt(ctx *Context, ni, di uint32) { +func (ly *LayerParams) InitExt(ni, di uint32) { Neurons[Ext, ni, di] = 0.0 Neurons[Target, ni, di] = 0.0 NrnClearFlag(ni, di, NeuronHasExt|NeuronHasTarg|NeuronHasCmpr) @@ -323,7 +322,7 @@ func (ly *LayerParams) InitExt(ctx *Context, ni, di uint32) { // setting flags based on type of layer. // Should only be called on Input, Target, Compare layers. // Negative values are not valid, and will be interpreted as missing inputs. -func (ly *LayerParams) ApplyExtValue(ctx *Context, ni, di uint32, val float32) { +func (ly *LayerParams) ApplyExtValue(ni, di uint32, val float32) { if val < 0 { return } @@ -339,12 +338,12 @@ func (ly *LayerParams) ApplyExtValue(ctx *Context, ni, di uint32, val float32) { NrnSetFlag(ni, di, setMask) } -func (ly *LayerParams) ApplyExtsNeuron(ctx *Context, ni, di uint32) { +func (ly *LayerParams) ApplyExtsNeuron(ni, di uint32) { lni := ni - ly.Indexes.NeurSt // layer-based - ly.InitExt(ctx, ni, di) + ly.InitExt(ni, di) if IsExtLayerType(ly.Type) { ei := ly.Indexes.ExtsSt + lni - ly.ApplyExtValue(ctx, ni, di, Exts[ei, di]) + ly.ApplyExtValue(ni, di, Exts[ei, di]) } } @@ -625,7 +624,7 @@ func (ly *LayerParams) SpecialPreGs(ctx *Context, pi, ni, di uint32, drvGe float Neurons[GeRaw, ni, di] = geRaw Neurons[GeSyn, ni, di] = ly.Acts.Dt.GeSynFromRawSteady(geRaw) case USLayer: - us := RubiconUSStimValue(ctx, di, pi0, ly.Learn.NeuroMod.Valence) + us := RubiconUSStimValue(di, pi0, ly.Learn.NeuroMod.Valence) geRaw := us if us > 0 { geRaw = ly.Acts.PopCode.EncodeGe(pni, pnn, us) @@ -799,27 +798,11 @@ func (ly *LayerParams) SpikeFromG(ctx *Context, lpi, ni, di uint32) { Neurons[SpkMax, ni, di] = spkmax } } - spksper := ctx.ThetaCycles / 8 - bin := ctx.Cycle / spksper spk := Neurons[Spike, ni, di] - fmt.Println(ctx.Cycle, bin) - switch bin { - case 0: - Neurons[SpkBin0, ni, di] += spk - case 1: - Neurons[SpkBin1, ni, di] += spk - case 2: - Neurons[SpkBin2, ni, di] += spk - case 3: - Neurons[SpkBin3, ni, di] += spk - case 4: - Neurons[SpkBin4, ni, di] += spk - case 5: - Neurons[SpkBin5, ni, di] += spk - case 6: - Neurons[SpkBin6, ni, di] += spk - default: - Neurons[SpkBin7, ni, di] += spk + if spk > 0 { + spksper := ctx.ThetaCycles / 8 + bin := min(ctx.Cycle / spksper, 7) + Neurons[SpkBin0 + NeuronVars(bin), ni, di] += spk } } @@ -907,7 +890,7 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, lpi, pi, ni, di uint32) { } Neurons[Act, ni, di] = act case USLayer: - us := RubiconUSStimValue(ctx, di, pi0, ly.Learn.NeuroMod.Valence) + us := RubiconUSStimValue(di, pi0, ly.Learn.NeuroMod.Valence) act := us if us > 0 { act = ly.Acts.PopCode.EncodeValue(pni, pnn, us) @@ -1163,16 +1146,6 @@ func (ly *LayerParams) NewStateNeuron(ctx *Context, ni, di uint32) { Neurons[SpkPrv, ni, di] = Neurons[CaSpkD, ni, di] Neurons[SpkMax, ni, di] = 0.0 Neurons[SpkMaxCa, ni, di] = 0.0 - - Neurons[SpkBin0, ni, di] = 0.0 - Neurons[SpkBin1, ni, di] = 0.0 - Neurons[SpkBin2, ni, di] = 0.0 - Neurons[SpkBin3, ni, di] = 0.0 - Neurons[SpkBin4, ni, di] = 0.0 - Neurons[SpkBin5, ni, di] = 0.0 - Neurons[SpkBin6, ni, di] = 0.0 - Neurons[SpkBin7, ni, di] = 0.0 - ly.Acts.DecayState(ctx, ni, di, ly.Acts.Decay.Act, ly.Acts.Decay.Glong, ly.Acts.Decay.AHP) // Note: synapse-level Ca decay happens in DWt ly.Acts.KNaNewState(ctx, ni, di) diff --git a/axon/looper.go b/axon/looper.go index 8b4f63ed..54564325 100644 --- a/axon/looper.go +++ b/axon/looper.go @@ -17,24 +17,26 @@ import ( // and plusEnd is end of plus phase, typically 199 // resets the state at start of trial. // Can pass a trial-level time scale to use instead of the default etime.Trial -func LooperStdPhases(man *looper.Manager, ctx *Context, net *Network, plusStart, plusEnd int, trial ...etime.Times) { +func LooperStdPhases(man *looper.Manager, net *Network, plusStart, plusEnd int, trial ...etime.Times) { trl := etime.Trial if len(trial) > 0 { trl = trial[0] } minusPhase := &looper.Event{Name: "MinusPhase", AtCounter: 0} minusPhase.OnEvent.Add("MinusPhase:Start", func() { + ctx := net.Context() ctx.PlusPhase.SetBool(false) ctx.NewPhase(false) }) - beta1 := looper.NewEvent("Beta1", 50, func() { net.SpkSt1(ctx) }) - beta2 := looper.NewEvent("Beta2", 100, func() { net.SpkSt2(ctx) }) + beta1 := looper.NewEvent("Beta1", 50, func() { net.SpkSt1() }) + beta2 := looper.NewEvent("Beta2", 100, func() { net.SpkSt2() }) plusPhase := &looper.Event{Name: "PlusPhase", AtCounter: plusStart} - plusPhase.OnEvent.Add("MinusPhase:End", func() { net.MinusPhase(ctx) }) + plusPhase.OnEvent.Add("MinusPhase:End", func() { net.MinusPhase() }) plusPhase.OnEvent.Add("PlusPhase:Start", func() { + ctx := net.Context() ctx.PlusPhase.SetBool(true) ctx.NewPhase(true) - net.PlusPhaseStart(ctx) + net.PlusPhaseStart() }) man.AddEventAllModes(etime.Cycle, minusPhase, beta1, beta2, plusPhase) @@ -42,11 +44,10 @@ func LooperStdPhases(man *looper.Manager, ctx *Context, net *Network, plusStart, for m, _ := range man.Stacks { stack := man.Stacks[m] stack.Loops[trl].OnStart.Add("NewState", func() { - net.NewState(ctx) - ctx.NewState(m) + net.NewState(m) }) stack.Loops[trl].OnEnd.Add("PlusPhase:End", func() { - net.PlusPhase(ctx) + net.PlusPhase() }) } } @@ -54,7 +55,7 @@ func LooperStdPhases(man *looper.Manager, ctx *Context, net *Network, plusStart, // LooperSimCycleAndLearn adds Cycle and DWt, WtFromDWt functions to looper // for given network, ctx, and netview update manager // Can pass a trial-level time scale to use instead of the default etime.Trial -func LooperSimCycleAndLearn(man *looper.Manager, net *Network, ctx *Context, viewupdt *netview.ViewUpdate, trial ...etime.Times) { +func LooperSimCycleAndLearn(man *looper.Manager, net *Network, viewupdt *netview.ViewUpdate, trial ...etime.Times) { trl := etime.Trial if len(trial) > 0 { trl = trial[0] @@ -73,20 +74,19 @@ func LooperSimCycleAndLearn(man *looper.Manager, net *Network, ctx *Context, vie // } // } net.Cycle() - ctx.CycleInc() }) } ttrl := man.GetLoop(etime.Train, trl) if ttrl != nil { ttrl.OnEnd.Add("UpdateWeights", func() { - net.DWt(ctx) + net.DWt() if viewupdt.IsViewingSynapse() { //TODO: // net.GPU.SyncSynapsesFromGPU() // net.GPU.SyncSynCaFromGPU() // note: only time we call this viewupdt.RecordSyns() // note: critical to update weights here so DWt is visible } - net.WtFromDWt(ctx) + net.WtFromDWt() }) } @@ -94,6 +94,7 @@ func LooperSimCycleAndLearn(man *looper.Manager, net *Network, ctx *Context, vie for m, loops := range man.Stacks { for _, loop := range loops.Loops { loop.OnStart.Add("SetCtxMode", func() { + ctx := net.Context() ctx.Mode = m }) } diff --git a/axon/network.go b/axon/network.go index 4f1302f0..5f51bb48 100644 --- a/axon/network.go +++ b/axon/network.go @@ -7,14 +7,8 @@ package axon import ( - "fmt" - "strings" - - "cogentcore.org/core/base/datasize" - "cogentcore.org/core/core" - "cogentcore.org/core/icons" "cogentcore.org/core/tensor" - "cogentcore.org/core/tree" + "github.com/emer/emergent/v2/etime" "github.com/emer/emergent/v2/paths" ) @@ -24,7 +18,7 @@ import ( // to compute one complete algorithmic alpha cycle update. // GlobalsReset resets all global values to 0, for all NData -func GlobalsReset(ctx *Context) { +func GlobalsReset() { nix := GetNetworkIxs(0) for di := uint32(0); di < nix.MaxData; di++ { for vg := GvRew; vg < GlobalScalarVarsN; vg++ { @@ -42,12 +36,14 @@ func GlobalsReset(ctx *Context) { // This is called *before* applying external input data and operates across // all data parallel values. The current Context.NData should be set // properly prior to calling this and subsequent Cycle methods. -func (nt *Network) NewState(ctx *Context) { +func (nt *Network) NewState(mode etime.Modes) { // if nt.GPU.On { // todo: this has a bug in neuron-level access in updating SpkPrv // // nt.GPU.RunNewState() // return // } + ctx := nt.Context() + ctx.NewState(mode) for _, ly := range nt.Layers { if ly.Off { continue @@ -62,8 +58,8 @@ func (nt *Network) NewState(ctx *Context) { // Cycle runs one cycle of activation updating using threading methods. func (nt *Network) Cycle() { // todo: chunks of 10 cycles - nix := GetNetworkIxs(0) - ctx := GetCtx(0) + nix := nt.NetIxs() + ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) ld := int(nix.NLayers * ctx.NData) pd := int(nix.NPools * ctx.NData) @@ -102,14 +98,14 @@ func (nt *Network) Cycle() { // InitExt initializes external input state. // Call prior to applying external inputs to layers. -func (nt *Network) InitExt(ctx *Context) { +func (nt *Network) InitExt() { // note: important to do this for GPU // to ensure partial inputs work the same way on CPU and GPU. for _, ly := range nt.Layers { if ly.Off { continue } - ly.InitExt(ctx) + ly.InitExt() } } @@ -117,29 +113,32 @@ func (nt *Network) InitExt(ctx *Context) { // that were set in prior layer-specific ApplyExt calls. // This does nothing on the CPU, but is critical for the GPU, // and should be added to all sims where GPU will be used. -func (nt *Network) ApplyExts(ctx *Context) { +func (nt *Network) ApplyExts() { if !UseGPU { return } - nix := GetNetworkIxs(0) + nix := nt.NetIxs() + ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) RunApplyExtsNeuron(nd) } // MinusPhase does updating after end of minus phase. -func (nt *Network) MinusPhase(ctx *Context) { - nix := GetNetworkIxs(0) +func (nt *Network) MinusPhase() { + nix := nt.NetIxs() + ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) pd := int(nix.NPools * ctx.NData) RunMinusPhasePool(pd) RunMinusPhaseNeuron(nd) - nt.MinusPhasePost(ctx) + nt.MinusPhasePost() // todo: // nt.GPU.SyncStateToGPU() } // MinusPhasePost does special CPU post processing. -func (nt *Network) MinusPhasePost(ctx *Context) { +func (nt *Network) MinusPhasePost() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -150,26 +149,29 @@ func (nt *Network) MinusPhasePost(ctx *Context) { // PlusPhaseStart does updating at the start of the plus phase: // applies Target inputs as External inputs. -func (nt *Network) PlusPhaseStart(ctx *Context) { - nix := GetNetworkIxs(0) +func (nt *Network) PlusPhaseStart() { + nix := nt.NetIxs() + ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) RunPlusPhaseStartNeuron(nd) } // PlusPhase does updating after end of plus phase -func (nt *Network) PlusPhase(ctx *Context) { - nix := GetNetworkIxs(0) +func (nt *Network) PlusPhase() { + nix := nt.NetIxs() + ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) pd := int(nix.NPools * ctx.NData) RunPlusPhasePool(pd) RunPlusPhaseNeuron(nd) - nt.PlusPhasePost(ctx) + nt.PlusPhasePost() // todo: // nt.GPU.SyncStateToGPU() } // PlusPhasePost happens on the CPU always. -func (nt *Network) PlusPhasePost(ctx *Context) { +func (nt *Network) PlusPhasePost() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -181,7 +183,8 @@ func (nt *Network) PlusPhasePost(ctx *Context) { // TargToExt sets external input Ext from target values Target // This is done at end of MinusPhase to allow targets to drive activity in plus phase. // This can be called separately to simulate alpha cycles within theta cycles, for example. -func (nt *Network) TargToExt(ctx *Context) { +func (nt *Network) TargToExt() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -192,7 +195,8 @@ func (nt *Network) TargToExt(ctx *Context) { // ClearTargExt clears external inputs Ext that were set from target values Target. // This can be called to simulate alpha cycles within theta cycles, for example. -func (nt *Network) ClearTargExt(ctx *Context) { +func (nt *Network) ClearTargExt() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -202,7 +206,8 @@ func (nt *Network) ClearTargExt(ctx *Context) { } // SpkSt1 saves current acts into SpkSt1 (using CaSpkP) -func (nt *Network) SpkSt1(ctx *Context) { +func (nt *Network) SpkSt1() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -212,7 +217,8 @@ func (nt *Network) SpkSt1(ctx *Context) { } // SpkSt2 saves current acts into SpkSt2 (using CaSpkP) -func (nt *Network) SpkSt2(ctx *Context) { +func (nt *Network) SpkSt2() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -224,8 +230,9 @@ func (nt *Network) SpkSt2(ctx *Context) { //////// Learn methods // DWt computes the weight change (learning) based on current running-average activation values -func (nt *Network) DWt(ctx *Context) { - nix := GetNetworkIxs(0) +func (nt *Network) DWt() { + nix := nt.NetIxs() + ctx := nt.Context() sd := int(nix.NSyns * ctx.NData) RunDWtSyn(sd) RunDWtFromDiSyn(int(nix.NSyns)) @@ -233,13 +240,40 @@ func (nt *Network) DWt(ctx *Context) { // WtFromDWt updates the weights from delta-weight changes. // Also does ctx.SlowInc() and calls SlowAdapt at SlowInterval -func (nt *Network) WtFromDWt(ctx *Context) { - nix := GetNetworkIxs(0) +func (nt *Network) WtFromDWt() { + nix := nt.NetIxs() + ctx := nt.Context() RunDWtSubMeanPath(int(nix.NPaths)) RunWtFromDWtSyn(int(nix.NSyns)) if ctx.SlowInc() { - nt.SlowAdapt(ctx) + nt.SlowAdapt() + } +} + +// SlowAdapt is the layer-level slow adaptation functions: Synaptic scaling, +// and adapting inhibition +func (nt *Network) SlowAdapt() { + // note: for now doing all this slow stuff CPU-side + // These Sync calls always check if GPU is On + // nt.GPU.SyncAllFromGPU() // todo: + + // todo: convert this to GPU mode + + ctx := nt.Context() + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.SlowAdapt(ctx) + } + for _, pt := range nt.Paths { + pt.SlowAdapt(ctx) } + // nt.LayerMapSeq(func(ly *Layer) { ly.SlowAdapt(ctx) }, "SlowAdapt") + // nt.PathMapSeq(func(pj *Path) { pj.SlowAdapt(ctx) }, "SlowAdapt") + + // nt.GPU.SyncAllToGPU() + // nt.GPU.SyncSynCaToGPU() // was cleared } //gosl:start @@ -326,7 +360,7 @@ func ApplyExtsNeuron(i uint32) { //gosl:kernel di := ctx.DataIndex(i) ni := ctx.ItemIndex(i) li := NeuronIxs.Value(int(NrnLayIndex), int(ni)) - Layers[li].ApplyExtsNeuron(ctx, ni, di) + Layers[li].ApplyExtsNeuron(ni, di) } // MinusPhasePool is the kernel over Pools * Data to @@ -416,28 +450,14 @@ func WtFromDWtSyn(syni uint32) { //gosl:kernel //gosl:end -// SlowAdapt is the layer-level slow adaptation functions: Synaptic scaling, -// and adapting inhibition -func (nt *Network) SlowAdapt(ctx *Context) { - // note: for now doing all this slow stuff CPU-side - // These Sync calls always check if GPU is On - // nt.GPU.SyncAllFromGPU() - - // nt.LayerMapSeq(func(ly *Layer) { ly.SlowAdapt(ctx) }, "SlowAdapt") - // nt.PathMapSeq(func(pj *Path) { pj.SlowAdapt(ctx) }, "SlowAdapt") - - // nt.GPU.SyncAllToGPU() - // nt.GPU.SyncSynCaToGPU() // was cleared -} - -////////////////////////////////////////////////////////////////////////////////////// -// Init methods +//////// Init methods // InitWeights initializes synaptic weights and all other associated long-term state variables // including running-average state values (e.g., layer running average activations etc) -func (nt *Network) InitWeights(ctx *Context) { //types:add +func (nt *Network) InitWeights() { //types:add + ctx := nt.Context() for di := uint32(0); di < ctx.NData; di++ { - nt.Rubicon.Reset(ctx, di) + nt.Rubicon.Reset(di) } nt.BuildPathGBuf() ctx.SlowCtr = 0 @@ -500,7 +520,8 @@ func (nt *Network) InitTopoSWts() { // InitGScale computes the initial scaling factor for synaptic input conductances G, // stored in GScale.Scale, based on sending layer initial activation. -func (nt *Network) InitGScale(ctx *Context) { +func (nt *Network) InitGScale() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -514,7 +535,9 @@ func (nt *Network) InitGScale(ctx *Context) { // glong = separate decay factor for long-timescale conductances (g) // This is called automatically in NewState, but is avail // here for ad-hoc decay cases. -func (nt *Network) DecayState(ctx *Context, decay, glong, ahp float32) { +func (nt *Network) DecayState(decay, glong, ahp float32) { + ctx := nt.Context() + // todo: move to gpu // nt.GPU.SyncStateFromGPU() // note: because we have to sync back, we need to sync from first to be current for _, ly := range nt.Layers { if ly.Off { @@ -530,15 +553,15 @@ func (nt *Network) DecayState(ctx *Context, decay, glong, ahp float32) { // DecayStateByType decays activation state for given layer types // by given proportion e.g., 1 = decay completely, and 0 = decay not at all. // glong = separate decay factor for long-timescale conductances (g) -func (nt *Network) DecayStateByType(ctx *Context, decay, glong, ahp float32, types ...LayerTypes) { - nt.DecayStateLayers(ctx, decay, glong, ahp, nt.LayersByType(types...)...) +func (nt *Network) DecayStateByType(decay, glong, ahp float32, types ...LayerTypes) { + nt.DecayStateLayers(decay, glong, ahp, nt.LayersByType(types...)...) } // DecayStateByClass decays activation state for given class name(s) // by given proportion e.g., 1 = decay completely, and 0 = decay not at all. // glong = separate decay factor for long-timescale conductances (g) -func (nt *Network) DecayStateByClass(ctx *Context, decay, glong, ahp float32, classes ...string) { - nt.DecayStateLayers(ctx, decay, glong, ahp, nt.LayersByClass(classes...)...) +func (nt *Network) DecayStateByClass(decay, glong, ahp float32, classes ...string) { + nt.DecayStateLayers(decay, glong, ahp, nt.LayersByClass(classes...)...) } // DecayStateLayers decays activation state for given layers @@ -547,7 +570,9 @@ func (nt *Network) DecayStateByClass(ctx *Context, decay, glong, ahp float32, cl // If this is not being called at the start, around NewState call, // then you should also call: nt.GPU.SyncGBufToGPU() // to zero the GBuf values which otherwise will persist spikes in flight. -func (nt *Network) DecayStateLayers(ctx *Context, decay, glong, ahp float32, layers ...string) { +func (nt *Network) DecayStateLayers(decay, glong, ahp float32, layers ...string) { + ctx := nt.Context() + // todo: move to gpu // nt.GPU.SyncStateFromGPU() // note: because we have to sync back, we need to sync from first to be current for _, lynm := range layers { ly := nt.LayerByName(lynm) @@ -562,21 +587,24 @@ func (nt *Network) DecayStateLayers(ctx *Context, decay, glong, ahp float32, lay } // InitActs fully initializes activation state -- not automatically called -func (nt *Network) InitActs(ctx *Context) { //types:add +func (nt *Network) InitActs() { //types:add + // todo: move to gpu + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue } ly.InitActs(ctx) } - // nt.GPU.SyncStateToGPU() + // nt.GPU.SyncStateToGPU() // todo: // nt.GPU.SyncGBufToGPU() // zeros everyone } // UpdateExtFlags updates the neuron flags for external input based on current // layer Type field -- call this if the Type has changed since the last // ApplyExt* method call. -func (nt *Network) UpdateExtFlags(ctx *Context) { +func (nt *Network) UpdateExtFlags() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -586,7 +614,7 @@ func (nt *Network) UpdateExtFlags(ctx *Context) { } // SynFail updates synaptic failure -func (nt *Network) SynFail(ctx *Context) { +func (nt *Network) SynFail() { // todo: // nt.PathMapSeq(func(pj *Path) { pj.SynFail(ctx) }, "SynFail") } @@ -644,7 +672,7 @@ func (nt *Network) LayersSetOff(off bool) { // UnLesionNeurons unlesions neurons in all layers in the network. // Provides a clean starting point for subsequent lesion experiments. -func (nt *Network) UnLesionNeurons(ctx *Context) { +func (nt *Network) UnLesionNeurons() { for _, ly := range nt.Layers { // if ly.Off { // keep all sync'd // @@ -654,8 +682,7 @@ func (nt *Network) UnLesionNeurons(ctx *Context) { } } -////////////////////////////////////////////////////////////////////////////////////// -// Methods used in MPI computation, which don't depend on MPI specifically +//////// Methods used in MPI computation, which don't depend on MPI specifically // CollectDWts writes all of the synaptic DWt values to given dwts slice // which is pre-allocated to given nwts size if dwts is nil, @@ -663,7 +690,7 @@ func (nt *Network) UnLesionNeurons(ctx *Context) { // dwts can be passed next time around. // Used for MPI sharing of weight changes across processors. // This calls SyncSynapsesFromGPU() (nop if not GPU) first. -func (nt *Network) CollectDWts(ctx *Context, dwts *[]float32) bool { +func (nt *Network) CollectDWts(dwts *[]float32) bool { // nt.GPU.SyncSynapsesFromGPU() idx := 0 made := false @@ -723,7 +750,7 @@ func (nt *Network) CollectDWts(ctx *Context, dwts *[]float32) bool { // navg is the number of processors aggregated in these dwts -- some variables need to be // averaged instead of summed (e.g., ActAvg) // This calls SyncSynapsesToGPU() (nop if not GPU) after. -func (nt *Network) SetDWts(ctx *Context, dwts []float32, navg int) { +func (nt *Network) SetDWts(dwts []float32, navg int) { idx := 0 davg := 1 / float32(navg) for li, ly := range nt.Layers { @@ -762,86 +789,3 @@ func (nt *Network) SetDWts(ctx *Context, dwts []float32, navg int) { } // nt.GPU.SyncSynapsesToGPU() // gpu will use dwts to update } - -////////////////////////////////////////////////////////////////////////////////////// -// Misc Reports / Threading Allocation - -// SizeReport returns a string reporting the size of each layer and pathway -// in the network, and total memory footprint. -// If detail flag is true, details per layer, pathway is included. -func (nt *Network) SizeReport(detail bool) string { - var b strings.Builder - - varBytes := 4 - synVarBytes := 4 - nix := nt.NetIxs() - maxData := int(nix.MaxData) - memNeuron := int(NeuronVarsN)*maxData*varBytes + int(NeuronAvgVarsN)*varBytes + int(NeuronIndexVarsN)*varBytes - memSynapse := int(SynapseVarsN)*varBytes + int(SynapseTraceVarsN)*maxData*varBytes + int(SynapseIndexVarsN)*varBytes - - globalProjIndexes := 0 - - for _, ly := range nt.Layers { - if detail { - nn := int(ly.NNeurons) - // Sizeof returns size of struct in bytes - nrnMem := nn * memNeuron - fmt.Fprintf(&b, "%14s:\t Neurons: %d\t NeurMem: %v \t Sends To:\n", ly.Name, nn, - (datasize.Size)(nrnMem).String()) - } - for _, pj := range ly.SendPaths { - // We only calculate the size of the important parts of the proj struct: - // 1. Synapse slice (consists of Synapse struct) - // 2. RecvConIndex + RecvSynIndex + SendConIndex (consists of int32 indices = 4B) - // - // Everything else (like eg the GBuf) is not included in the size calculation, as their size - // doesn't grow quadratically with the number of neurons, and hence pales when compared to the synapses - // It's also useful to run a -memprofile=mem.prof to validate actual memory usage - projMemIndexes := len(pj.RecvConIndex)*varBytes + len(pj.RecvSynIndex)*varBytes + len(pj.SendConIndex)*varBytes - globalProjIndexes += projMemIndexes - if detail { - nSyn := int(pj.NSyns) - synMem := nSyn*memSynapse + projMemIndexes - fmt.Fprintf(&b, "\t%14s:\t Syns: %d\t SynnMem: %v\n", pj.Recv.Name, - nSyn, (datasize.Size)(synMem).String()) - } - } - } - - nrnMem := (nt.Neurons.Len() + nt.NeuronAvgs.Len() + nt.NeuronIxs.Len()) * varBytes - synIndexMem := nt.SynapseIxs.Len() * varBytes - synWtMem := nt.Synapses.Len() * synVarBytes - synCaMem := nt.SynapseTraces.Len() * synVarBytes - - fmt.Fprintf(&b, "\n\n%14s:\t Neurons: %d\t NeurMem: %v \t Syns: %d \t SynIndexes: %v \t SynWts: %v \t SynCa: %v\n", - nt.Name, nix.NNeurons, (datasize.Size)(nrnMem).String(), nix.NSyns, - (datasize.Size)(synIndexMem).String(), (datasize.Size)(synWtMem).String(), (datasize.Size)(synCaMem).String()) - return b.String() -} - -func (nt *Network) MakeToolbar(p *tree.Plan) { - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.ShowAllGlobals).SetText("Global Vars").SetIcon(icons.Info) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.SaveWeightsJSON). - SetText("Save Weights").SetIcon(icons.Save) - w.Args[0].SetTag(`extension:".wts,.wts.gz"`) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.OpenWeightsJSON).SetText("Open Weights").SetIcon(icons.Open) - w.Args[0].SetTag(`extension:".wts,.wts.gz"`) - }) - - tree.Add(p, func(w *core.Separator) {}) - - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.Build).SetIcon(icons.Reset) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.InitWeights).SetIcon(icons.Reset) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.InitActs).SetIcon(icons.Reset) - }) -} diff --git a/axon/network.goal b/axon/network.goal index 4fd17661..faeba58d 100644 --- a/axon/network.goal +++ b/axon/network.goal @@ -5,14 +5,8 @@ package axon import ( - "fmt" - "strings" - - "cogentcore.org/core/base/datasize" - "cogentcore.org/core/core" - "cogentcore.org/core/icons" "cogentcore.org/core/tensor" - "cogentcore.org/core/tree" + "github.com/emer/emergent/v2/etime" "github.com/emer/emergent/v2/paths" ) @@ -23,7 +17,7 @@ import ( // to compute one complete algorithmic alpha cycle update. // GlobalsReset resets all global values to 0, for all NData -func GlobalsReset(ctx *Context) { +func GlobalsReset() { nix := GetNetworkIxs(0) for di := uint32(0); di < nix.MaxData; di++ { for vg := GvRew; vg < GlobalScalarVarsN; vg++ { @@ -41,11 +35,13 @@ func GlobalsReset(ctx *Context) { // This is called *before* applying external input data and operates across // all data parallel values. The current Context.NData should be set // properly prior to calling this and subsequent Cycle methods. -func (nt *Network) NewState(ctx *Context) { +func (nt *Network) NewState(mode etime.Modes) { // if nt.GPU.On { // todo: this has a bug in neuron-level access in updating SpkPrv // nt.GPU.RunNewState() // return // } + ctx := nt.Context() + ctx.NewState(mode) for _, ly := range nt.Layers { if ly.Off { continue @@ -60,8 +56,8 @@ func (nt *Network) NewState(ctx *Context) { // Cycle runs one cycle of activation updating using threading methods. func (nt *Network) Cycle() { // todo: chunks of 10 cycles - nix := GetNetworkIxs(0) - ctx := GetCtx(0) + nix := nt.NetIxs() + ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) ld := int(nix.NLayers * ctx.NData) pd := int(nix.NPools * ctx.NData) @@ -96,14 +92,14 @@ func (nt *Network) Cycle() { // InitExt initializes external input state. // Call prior to applying external inputs to layers. -func (nt *Network) InitExt(ctx *Context) { +func (nt *Network) InitExt() { // note: important to do this for GPU // to ensure partial inputs work the same way on CPU and GPU. for _, ly := range nt.Layers { if ly.Off { continue } - ly.InitExt(ctx) + ly.InitExt() } } @@ -111,29 +107,32 @@ func (nt *Network) InitExt(ctx *Context) { // that were set in prior layer-specific ApplyExt calls. // This does nothing on the CPU, but is critical for the GPU, // and should be added to all sims where GPU will be used. -func (nt *Network) ApplyExts(ctx *Context) { +func (nt *Network) ApplyExts() { if !UseGPU { return } - nix := GetNetworkIxs(0) + nix := nt.NetIxs() + ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) RunApplyExtsNeuron(nd) } // MinusPhase does updating after end of minus phase. -func (nt *Network) MinusPhase(ctx *Context) { - nix := GetNetworkIxs(0) +func (nt *Network) MinusPhase() { + nix := nt.NetIxs() + ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) pd := int(nix.NPools * ctx.NData) RunMinusPhasePool(pd) RunMinusPhaseNeuron(nd) - nt.MinusPhasePost(ctx) + nt.MinusPhasePost() // todo: // nt.GPU.SyncStateToGPU() } // MinusPhasePost does special CPU post processing. -func (nt *Network) MinusPhasePost(ctx *Context) { +func (nt *Network) MinusPhasePost() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -144,26 +143,29 @@ func (nt *Network) MinusPhasePost(ctx *Context) { // PlusPhaseStart does updating at the start of the plus phase: // applies Target inputs as External inputs. -func (nt *Network) PlusPhaseStart(ctx *Context) { - nix := GetNetworkIxs(0) +func (nt *Network) PlusPhaseStart() { + nix := nt.NetIxs() + ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) RunPlusPhaseStartNeuron(nd) } // PlusPhase does updating after end of plus phase -func (nt *Network) PlusPhase(ctx *Context) { - nix := GetNetworkIxs(0) +func (nt *Network) PlusPhase() { + nix := nt.NetIxs() + ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) pd := int(nix.NPools * ctx.NData) RunPlusPhasePool(pd) RunPlusPhaseNeuron(nd) - nt.PlusPhasePost(ctx) + nt.PlusPhasePost() // todo: // nt.GPU.SyncStateToGPU() } // PlusPhasePost happens on the CPU always. -func (nt *Network) PlusPhasePost(ctx *Context) { +func (nt *Network) PlusPhasePost() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -175,7 +177,8 @@ func (nt *Network) PlusPhasePost(ctx *Context) { // TargToExt sets external input Ext from target values Target // This is done at end of MinusPhase to allow targets to drive activity in plus phase. // This can be called separately to simulate alpha cycles within theta cycles, for example. -func (nt *Network) TargToExt(ctx *Context) { +func (nt *Network) TargToExt() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -186,7 +189,8 @@ func (nt *Network) TargToExt(ctx *Context) { // ClearTargExt clears external inputs Ext that were set from target values Target. // This can be called to simulate alpha cycles within theta cycles, for example. -func (nt *Network) ClearTargExt(ctx *Context) { +func (nt *Network) ClearTargExt() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -196,7 +200,8 @@ func (nt *Network) ClearTargExt(ctx *Context) { } // SpkSt1 saves current acts into SpkSt1 (using CaSpkP) -func (nt *Network) SpkSt1(ctx *Context) { +func (nt *Network) SpkSt1() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -206,7 +211,8 @@ func (nt *Network) SpkSt1(ctx *Context) { } // SpkSt2 saves current acts into SpkSt2 (using CaSpkP) -func (nt *Network) SpkSt2(ctx *Context) { +func (nt *Network) SpkSt2() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -218,8 +224,9 @@ func (nt *Network) SpkSt2(ctx *Context) { //////// Learn methods // DWt computes the weight change (learning) based on current running-average activation values -func (nt *Network) DWt(ctx *Context) { - nix := GetNetworkIxs(0) +func (nt *Network) DWt() { + nix := nt.NetIxs() + ctx := nt.Context() sd := int(nix.NSyns * ctx.NData) RunDWtSyn(sd) RunDWtFromDiSyn(int(nix.NSyns)) @@ -227,15 +234,43 @@ func (nt *Network) DWt(ctx *Context) { // WtFromDWt updates the weights from delta-weight changes. // Also does ctx.SlowInc() and calls SlowAdapt at SlowInterval -func (nt *Network) WtFromDWt(ctx *Context) { - nix := GetNetworkIxs(0) +func (nt *Network) WtFromDWt() { + nix := nt.NetIxs() + ctx := nt.Context() RunDWtSubMeanPath(int(nix.NPaths)) RunWtFromDWtSyn(int(nix.NSyns)) if ctx.SlowInc() { - nt.SlowAdapt(ctx) + nt.SlowAdapt() + } +} + +// SlowAdapt is the layer-level slow adaptation functions: Synaptic scaling, +// and adapting inhibition +func (nt *Network) SlowAdapt() { + // note: for now doing all this slow stuff CPU-side + // These Sync calls always check if GPU is On + // nt.GPU.SyncAllFromGPU() // todo: + + // todo: convert this to GPU mode + + ctx := nt.Context() + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.SlowAdapt(ctx) + } + for _, pt := range nt.Paths { + pt.SlowAdapt(ctx) } + // nt.LayerMapSeq(func(ly *Layer) { ly.SlowAdapt(ctx) }, "SlowAdapt") + // nt.PathMapSeq(func(pj *Path) { pj.SlowAdapt(ctx) }, "SlowAdapt") + + // nt.GPU.SyncAllToGPU() + // nt.GPU.SyncSynCaToGPU() // was cleared } + //gosl:start //////// Kernels for all parallel CPU / GPU compute are here: @@ -320,7 +355,7 @@ func ApplyExtsNeuron(i uint32) { //gosl:kernel di := ctx.DataIndex(i) ni := ctx.ItemIndex(i) li := NeuronIxs[NrnLayIndex, ni] - Layers[li].ApplyExtsNeuron(ctx, ni, di) + Layers[li].ApplyExtsNeuron(ni, di) } // MinusPhasePool is the kernel over Pools * Data to @@ -411,28 +446,14 @@ func WtFromDWtSyn(syni uint32) { //gosl:kernel //gosl:end -// SlowAdapt is the layer-level slow adaptation functions: Synaptic scaling, -// and adapting inhibition -func (nt *Network) SlowAdapt(ctx *Context) { - // note: for now doing all this slow stuff CPU-side - // These Sync calls always check if GPU is On - // nt.GPU.SyncAllFromGPU() - - // nt.LayerMapSeq(func(ly *Layer) { ly.SlowAdapt(ctx) }, "SlowAdapt") - // nt.PathMapSeq(func(pj *Path) { pj.SlowAdapt(ctx) }, "SlowAdapt") - - // nt.GPU.SyncAllToGPU() - // nt.GPU.SyncSynCaToGPU() // was cleared -} - -////////////////////////////////////////////////////////////////////////////////////// -// Init methods +//////// Init methods // InitWeights initializes synaptic weights and all other associated long-term state variables // including running-average state values (e.g., layer running average activations etc) -func (nt *Network) InitWeights(ctx *Context) { //types:add +func (nt *Network) InitWeights() { //types:add + ctx := nt.Context() for di := uint32(0); di < ctx.NData; di++ { - nt.Rubicon.Reset(ctx, di) + nt.Rubicon.Reset(di) } nt.BuildPathGBuf() ctx.SlowCtr = 0 @@ -495,7 +516,8 @@ func (nt *Network) InitTopoSWts() { // InitGScale computes the initial scaling factor for synaptic input conductances G, // stored in GScale.Scale, based on sending layer initial activation. -func (nt *Network) InitGScale(ctx *Context) { +func (nt *Network) InitGScale() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -509,7 +531,9 @@ func (nt *Network) InitGScale(ctx *Context) { // glong = separate decay factor for long-timescale conductances (g) // This is called automatically in NewState, but is avail // here for ad-hoc decay cases. -func (nt *Network) DecayState(ctx *Context, decay, glong, ahp float32) { +func (nt *Network) DecayState(decay, glong, ahp float32) { + ctx := nt.Context() + // todo: move to gpu // nt.GPU.SyncStateFromGPU() // note: because we have to sync back, we need to sync from first to be current for _, ly := range nt.Layers { if ly.Off { @@ -525,15 +549,15 @@ func (nt *Network) DecayState(ctx *Context, decay, glong, ahp float32) { // DecayStateByType decays activation state for given layer types // by given proportion e.g., 1 = decay completely, and 0 = decay not at all. // glong = separate decay factor for long-timescale conductances (g) -func (nt *Network) DecayStateByType(ctx *Context, decay, glong, ahp float32, types ...LayerTypes) { - nt.DecayStateLayers(ctx, decay, glong, ahp, nt.LayersByType(types...)...) +func (nt *Network) DecayStateByType(decay, glong, ahp float32, types ...LayerTypes) { + nt.DecayStateLayers(decay, glong, ahp, nt.LayersByType(types...)...) } // DecayStateByClass decays activation state for given class name(s) // by given proportion e.g., 1 = decay completely, and 0 = decay not at all. // glong = separate decay factor for long-timescale conductances (g) -func (nt *Network) DecayStateByClass(ctx *Context, decay, glong, ahp float32, classes ...string) { - nt.DecayStateLayers(ctx, decay, glong, ahp, nt.LayersByClass(classes...)...) +func (nt *Network) DecayStateByClass(decay, glong, ahp float32, classes ...string) { + nt.DecayStateLayers(decay, glong, ahp, nt.LayersByClass(classes...)...) } // DecayStateLayers decays activation state for given layers @@ -542,7 +566,9 @@ func (nt *Network) DecayStateByClass(ctx *Context, decay, glong, ahp float32, cl // If this is not being called at the start, around NewState call, // then you should also call: nt.GPU.SyncGBufToGPU() // to zero the GBuf values which otherwise will persist spikes in flight. -func (nt *Network) DecayStateLayers(ctx *Context, decay, glong, ahp float32, layers ...string) { +func (nt *Network) DecayStateLayers(decay, glong, ahp float32, layers ...string) { + ctx := nt.Context() + // todo: move to gpu // nt.GPU.SyncStateFromGPU() // note: because we have to sync back, we need to sync from first to be current for _, lynm := range layers { ly := nt.LayerByName(lynm) @@ -557,21 +583,24 @@ func (nt *Network) DecayStateLayers(ctx *Context, decay, glong, ahp float32, lay } // InitActs fully initializes activation state -- not automatically called -func (nt *Network) InitActs(ctx *Context) { //types:add +func (nt *Network) InitActs() { //types:add + // todo: move to gpu + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue } ly.InitActs(ctx) } - // nt.GPU.SyncStateToGPU() + // nt.GPU.SyncStateToGPU() // todo: // nt.GPU.SyncGBufToGPU() // zeros everyone } // UpdateExtFlags updates the neuron flags for external input based on current // layer Type field -- call this if the Type has changed since the last // ApplyExt* method call. -func (nt *Network) UpdateExtFlags(ctx *Context) { +func (nt *Network) UpdateExtFlags() { + ctx := nt.Context() for _, ly := range nt.Layers { if ly.Off { continue @@ -581,7 +610,7 @@ func (nt *Network) UpdateExtFlags(ctx *Context) { } // SynFail updates synaptic failure -func (nt *Network) SynFail(ctx *Context) { +func (nt *Network) SynFail() { // todo: // nt.PathMapSeq(func(pj *Path) { pj.SynFail(ctx) }, "SynFail") } @@ -636,7 +665,7 @@ func (nt *Network) LayersSetOff(off bool) { // UnLesionNeurons unlesions neurons in all layers in the network. // Provides a clean starting point for subsequent lesion experiments. -func (nt *Network) UnLesionNeurons(ctx *Context) { +func (nt *Network) UnLesionNeurons() { for _, ly := range nt.Layers { // if ly.Off { // keep all sync'd // continue @@ -645,8 +674,7 @@ func (nt *Network) UnLesionNeurons(ctx *Context) { } } -////////////////////////////////////////////////////////////////////////////////////// -// Methods used in MPI computation, which don't depend on MPI specifically +//////// Methods used in MPI computation, which don't depend on MPI specifically // CollectDWts writes all of the synaptic DWt values to given dwts slice // which is pre-allocated to given nwts size if dwts is nil, @@ -654,7 +682,7 @@ func (nt *Network) UnLesionNeurons(ctx *Context) { // dwts can be passed next time around. // Used for MPI sharing of weight changes across processors. // This calls SyncSynapsesFromGPU() (nop if not GPU) first. -func (nt *Network) CollectDWts(ctx *Context, dwts *[]float32) bool { +func (nt *Network) CollectDWts(dwts *[]float32) bool { // nt.GPU.SyncSynapsesFromGPU() idx := 0 made := false @@ -714,7 +742,7 @@ func (nt *Network) CollectDWts(ctx *Context, dwts *[]float32) bool { // navg is the number of processors aggregated in these dwts -- some variables need to be // averaged instead of summed (e.g., ActAvg) // This calls SyncSynapsesToGPU() (nop if not GPU) after. -func (nt *Network) SetDWts(ctx *Context, dwts []float32, navg int) { +func (nt *Network) SetDWts(dwts []float32, navg int) { idx := 0 davg := 1 / float32(navg) for li, ly := range nt.Layers { @@ -754,84 +782,4 @@ func (nt *Network) SetDWts(ctx *Context, dwts []float32, navg int) { // nt.GPU.SyncSynapsesToGPU() // gpu will use dwts to update } -////////////////////////////////////////////////////////////////////////////////////// -// Misc Reports / Threading Allocation -// SizeReport returns a string reporting the size of each layer and pathway -// in the network, and total memory footprint. -// If detail flag is true, details per layer, pathway is included. -func (nt *Network) SizeReport(detail bool) string { - var b strings.Builder - - varBytes := 4 - synVarBytes := 4 - nix := nt.NetIxs() - maxData := int(nix.MaxData) - memNeuron := int(NeuronVarsN)*maxData*varBytes + int(NeuronAvgVarsN)*varBytes + int(NeuronIndexVarsN)*varBytes - memSynapse := int(SynapseVarsN)*varBytes + int(SynapseTraceVarsN)*maxData*varBytes + int(SynapseIndexVarsN)*varBytes - - globalProjIndexes := 0 - - for _, ly := range nt.Layers { - if detail { - nn := int(ly.NNeurons) - // Sizeof returns size of struct in bytes - nrnMem := nn * memNeuron - fmt.Fprintf(&b, "%14s:\t Neurons: %d\t NeurMem: %v \t Sends To:\n", ly.Name, nn, - (datasize.Size)(nrnMem).String()) - } - for _, pj := range ly.SendPaths { - // We only calculate the size of the important parts of the proj struct: - // 1. Synapse slice (consists of Synapse struct) - // 2. RecvConIndex + RecvSynIndex + SendConIndex (consists of int32 indices = 4B) - // Everything else (like eg the GBuf) is not included in the size calculation, as their size - // doesn't grow quadratically with the number of neurons, and hence pales when compared to the synapses - // It's also useful to run a -memprofile=mem.prof to validate actual memory usage - projMemIndexes := len(pj.RecvConIndex)*varBytes + len(pj.RecvSynIndex)*varBytes + len(pj.SendConIndex)*varBytes - globalProjIndexes += projMemIndexes - if detail { - nSyn := int(pj.NSyns) - synMem := nSyn*memSynapse + projMemIndexes - fmt.Fprintf(&b, "\t%14s:\t Syns: %d\t SynnMem: %v\n", pj.Recv.Name, - nSyn, (datasize.Size)(synMem).String()) - } - } - } - - nrnMem := (nt.Neurons.Len() + nt.NeuronAvgs.Len() + nt.NeuronIxs.Len()) * varBytes - synIndexMem := nt.SynapseIxs.Len() * varBytes - synWtMem := nt.Synapses.Len() * synVarBytes - synCaMem := nt.SynapseTraces.Len() * synVarBytes - - fmt.Fprintf(&b, "\n\n%14s:\t Neurons: %d\t NeurMem: %v \t Syns: %d \t SynIndexes: %v \t SynWts: %v \t SynCa: %v\n", - nt.Name, nix.NNeurons, (datasize.Size)(nrnMem).String(), nix.NSyns, - (datasize.Size)(synIndexMem).String(), (datasize.Size)(synWtMem).String(), (datasize.Size)(synCaMem).String()) - return b.String() -} - -func (nt *Network) MakeToolbar(p *tree.Plan) { - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.ShowAllGlobals).SetText("Global Vars").SetIcon(icons.Info) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.SaveWeightsJSON). - SetText("Save Weights").SetIcon(icons.Save) - w.Args[0].SetTag(`extension:".wts,.wts.gz"`) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.OpenWeightsJSON).SetText("Open Weights").SetIcon(icons.Open) - w.Args[0].SetTag(`extension:".wts,.wts.gz"`) - }) - - tree.Add(p, func(w *core.Separator) {}) - - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.Build).SetIcon(icons.Reset) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.InitWeights).SetIcon(icons.Reset) - }) - tree.Add(p, func(w *core.FuncButton) { - w.SetFunc(nt.InitActs).SetIcon(icons.Reset) - }) -} diff --git a/axon/networkbase.go b/axon/networkbase.go index a25edcdc..dff92e02 100644 --- a/axon/networkbase.go +++ b/axon/networkbase.go @@ -20,14 +20,18 @@ import ( "math" "os" "path/filepath" + "strings" "time" + "cogentcore.org/core/base/datasize" "cogentcore.org/core/base/slicesx" "cogentcore.org/core/base/timer" "cogentcore.org/core/core" "cogentcore.org/core/goal/gosl/sltensor" + "cogentcore.org/core/icons" "cogentcore.org/core/tensor" "cogentcore.org/core/texteditor" + "cogentcore.org/core/tree" "github.com/emer/emergent/v2/econfig" "github.com/emer/emergent/v2/emer" "github.com/emer/emergent/v2/params" @@ -89,7 +93,12 @@ type NetworkIndexes struct { //gosl:end -// axon.Network implements the Axon spiking model. +// Network implements the Axon spiking model. +// Most of the fields are copied to the global vars, needed for GPU, +// via the SetAsCurrent method, and must be slices or tensors so that +// there is one canonical underlying instance of all such data. +// There are also Layer and Path lists that are used to scaffold the +// building and display of the network, but contain no data. type Network struct { emer.NetworkBase @@ -118,7 +127,6 @@ type Network struct { // timers for each major function (step of processing). FunTimes map[string]*timer.Time `display:"-"` - //// Global state below //////// Params // LayParams are all the layer parameters. [NLayers] @@ -162,7 +170,11 @@ type Network struct { //////// Neuron State - // Ctx is the current context state (one). + // note: A slice is needed even for single elements so that global vars and network + // point to the same underlying instance. + + // Ctx is the context state (one). Other copies of Context can be maintained + // and [SetContext] to update this one, but this instance is the canonical one. Ctx []Context `display:"-"` // Neurons are all the neuron state variables. @@ -239,10 +251,22 @@ type Network struct { SynapseTraces2 tensor.Float32 `display:"-"` } -// Get the network context state +// Context gets the network context state. func (nt *Network) Context() *Context { return &nt.Ctx[0] } func (nt *Network) NetIxs() *NetworkIndexes { return &nt.NetworkIxs[0] } +// SetContext sets the values of the network context, which is the canonical instance. +func (nt *Network) SetContext(ctx *Context) { nt.Ctx[0] = *ctx } + +// SetNData sets the NData in [Context] to given value. +func (nt *Network) SetNData(nData int) { nt.Context().NData = uint32(nData) } + +// SetMaxData sets the MaxData and current NData to the same value. +func (nt *Network) SetMaxData(maxData int) { + nt.NetIxs().MaxData = uint32(maxData) + nt.SetNData(maxData) +} + // emer.Network interface methods: func (nt *Network) NumLayers() int { return len(nt.Layers) } func (nt *Network) EmerLayer(idx int) emer.Layer { return nt.Layers[idx] } @@ -252,6 +276,7 @@ func (nt *Network) NParallelData() int { return int(nt.Context().NData func (nt *Network) Init() { nt.NetworkIxs = make([]NetworkIndexes, 1) nt.Ctx = make([]Context, 1) + nt.Context().Defaults() nt.NetIxs().MaxData = 1 NetworkIxs = nt.NetworkIxs // may reference things before build } @@ -654,23 +679,15 @@ func (nt *Network) LateralConnectLayerPath(lay *Layer, pat paths.Pattern, pt *Pa return pt } -// SetMaxData sets the MaxData and current NData for both the Network and the Context -func (nt *Network) SetMaxData(simCtx *Context, maxData int) { - nt.NetIxs().MaxData = uint32(maxData) - simCtx.NData = uint32(maxData) -} - // Build constructs the layer and pathway state based on the layer shapes // and patterns of interconnectivity. -func (nt *Network) Build(simCtx *Context) error { //types:add +func (nt *Network) Build() error { //types:add nix := nt.NetIxs() nt.MakeLayerMaps() if nt.Rubicon.NPosUSs == 0 { - nt.Rubicon.SetNUSs(simCtx, 1, 1) + nt.Rubicon.SetNUSs(1, 1) } nt.Rubicon.Update() - ctx := nt.Context() - *ctx = *simCtx nt.FunTimes = make(map[string]*timer.Time) maxData := int(nix.MaxData) var errs []error @@ -698,7 +715,6 @@ func (nt *Network) Build(simCtx *Context) error { //types:add nix.RubiconNPosUSs = nt.Rubicon.NPosUSs nix.RubiconNNegUSs = nt.Rubicon.NNegUSs - fmt.Println("totPools", totPools) nt.LayParams = make([]LayerParams, nLayers) sltensor.SetShapeSizes(&nt.LayerStates, int(LayerVarsN), nLayers, maxData) sltensor.SetShapeSizes(&nt.Pools, int(PoolVarsN), totPools, maxData) @@ -1031,6 +1047,59 @@ func (nt *Network) CheckSameSize(on *Network) error { return nil } +// SizeReport returns a string reporting the size of each layer and pathway +// in the network, and total memory footprint. +// If detail flag is true, details per layer, pathway is included. +func (nt *Network) SizeReport(detail bool) string { + var b strings.Builder + + varBytes := 4 + synVarBytes := 4 + nix := nt.NetIxs() + maxData := int(nix.MaxData) + memNeuron := int(NeuronVarsN)*maxData*varBytes + int(NeuronAvgVarsN)*varBytes + int(NeuronIndexVarsN)*varBytes + memSynapse := int(SynapseVarsN)*varBytes + int(SynapseTraceVarsN)*maxData*varBytes + int(SynapseIndexVarsN)*varBytes + + globalProjIndexes := 0 + + for _, ly := range nt.Layers { + if detail { + nn := int(ly.NNeurons) + // Sizeof returns size of struct in bytes + nrnMem := nn * memNeuron + fmt.Fprintf(&b, "%14s:\t Neurons: %d\t NeurMem: %v \t Sends To:\n", ly.Name, nn, + (datasize.Size)(nrnMem).String()) + } + for _, pj := range ly.SendPaths { + // We only calculate the size of the important parts of the proj struct: + // 1. Synapse slice (consists of Synapse struct) + // 2. RecvConIndex + RecvSynIndex + SendConIndex (consists of int32 indices = 4B) + // + // Everything else (like eg the GBuf) is not included in the size calculation, as their size + // doesn't grow quadratically with the number of neurons, and hence pales when compared to the synapses + // It's also useful to run a -memprofile=mem.prof to validate actual memory usage + projMemIndexes := len(pj.RecvConIndex)*varBytes + len(pj.RecvSynIndex)*varBytes + len(pj.SendConIndex)*varBytes + globalProjIndexes += projMemIndexes + if detail { + nSyn := int(pj.NSyns) + synMem := nSyn*memSynapse + projMemIndexes + fmt.Fprintf(&b, "\t%14s:\t Syns: %d\t SynnMem: %v\n", pj.Recv.Name, + nSyn, (datasize.Size)(synMem).String()) + } + } + } + + nrnMem := (nt.Neurons.Len() + nt.NeuronAvgs.Len() + nt.NeuronIxs.Len()) * varBytes + synIndexMem := nt.SynapseIxs.Len() * varBytes + synWtMem := nt.Synapses.Len() * synVarBytes + synCaMem := nt.SynapseTraces.Len() * synVarBytes + + fmt.Fprintf(&b, "\n\n%14s:\t Neurons: %d\t NeurMem: %v \t Syns: %d \t SynIndexes: %v \t SynWts: %v \t SynCa: %v\n", + nt.Name, nix.NNeurons, (datasize.Size)(nrnMem).String(), nix.NSyns, + (datasize.Size)(synIndexMem).String(), (datasize.Size)(synWtMem).String(), (datasize.Size)(synCaMem).String()) + return b.String() +} + // CopyStateFrom copies entire network state from other network. // Other network must have identical configuration, as this just // does a literal copy of the state values. This is checked @@ -1116,3 +1185,30 @@ func (nt *Network) DiffFrom(ctx *Context, on *Network, maxDiff int) string { } return diffs } + +func (nt *Network) MakeToolbar(p *tree.Plan) { + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.ShowAllGlobals).SetText("Global Vars").SetIcon(icons.Info) + }) + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.SaveWeightsJSON). + SetText("Save Weights").SetIcon(icons.Save) + w.Args[0].SetTag(`extension:".wts,.wts.gz"`) + }) + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.OpenWeightsJSON).SetText("Open Weights").SetIcon(icons.Open) + w.Args[0].SetTag(`extension:".wts,.wts.gz"`) + }) + + tree.Add(p, func(w *core.Separator) {}) + + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.Build).SetIcon(icons.Reset) + }) + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.InitWeights).SetIcon(icons.Reset) + }) + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.InitActs).SetIcon(icons.Reset) + }) +} diff --git a/axon/networkbase.goal b/axon/networkbase.goal index 8e9a2afa..d141984d 100644 --- a/axon/networkbase.goal +++ b/axon/networkbase.goal @@ -18,14 +18,18 @@ import ( "math" "os" "path/filepath" + "strings" "time" + "cogentcore.org/core/base/datasize" "cogentcore.org/core/base/slicesx" "cogentcore.org/core/base/timer" "cogentcore.org/core/core" "cogentcore.org/core/goal/gosl/sltensor" + "cogentcore.org/core/icons" "cogentcore.org/core/tensor" "cogentcore.org/core/texteditor" + "cogentcore.org/core/tree" "github.com/emer/emergent/v2/econfig" "github.com/emer/emergent/v2/emer" "github.com/emer/emergent/v2/params" @@ -87,7 +91,12 @@ type NetworkIndexes struct { //gosl:end -// axon.Network implements the Axon spiking model. +// Network implements the Axon spiking model. +// Most of the fields are copied to the global vars, needed for GPU, +// via the SetAsCurrent method, and must be slices or tensors so that +// there is one canonical underlying instance of all such data. +// There are also Layer and Path lists that are used to scaffold the +// building and display of the network, but contain no data. type Network struct { emer.NetworkBase @@ -116,7 +125,6 @@ type Network struct { // timers for each major function (step of processing). FunTimes map[string]*timer.Time `display:"-"` - //// Global state below //////// Params // LayParams are all the layer parameters. [NLayers] @@ -160,7 +168,11 @@ type Network struct { //////// Neuron State - // Ctx is the current context state (one). + // note: A slice is needed even for single elements so that global vars and network + // point to the same underlying instance. + + // Ctx is the context state (one). Other copies of Context can be maintained + // and [SetContext] to update this one, but this instance is the canonical one. Ctx []Context `display:"-"` // Neurons are all the neuron state variables. @@ -236,10 +248,22 @@ type Network struct { SynapseTraces2 tensor.Float32 `display:"-"` } -// Get the network context state +// Context gets the network context state. func (nt *Network) Context() *Context { return &nt.Ctx[0] } func (nt *Network) NetIxs() *NetworkIndexes { return &nt.NetworkIxs[0] } +// SetContext sets the values of the network context, which is the canonical instance. +func (nt *Network) SetContext(ctx *Context) { nt.Ctx[0] = *ctx } + +// SetNData sets the NData in [Context] to given value. +func (nt *Network) SetNData(nData int) { nt.Context().NData = uint32(nData) } + +// SetMaxData sets the MaxData and current NData to the same value. +func (nt *Network) SetMaxData(maxData int) { + nt.NetIxs().MaxData = uint32(maxData) + nt.SetNData(maxData) +} + // emer.Network interface methods: func (nt *Network) NumLayers() int { return len(nt.Layers) } func (nt *Network) EmerLayer(idx int) emer.Layer { return nt.Layers[idx] } @@ -249,6 +273,7 @@ func (nt *Network) NParallelData() int { return int(nt.Context().NData func (nt *Network) Init() { nt.NetworkIxs = make([]NetworkIndexes, 1) nt.Ctx = make([]Context, 1) + nt.Context().Defaults() nt.NetIxs().MaxData = 1 NetworkIxs = nt.NetworkIxs // may reference things before build } @@ -651,23 +676,15 @@ func (nt *Network) LateralConnectLayerPath(lay *Layer, pat paths.Pattern, pt *Pa return pt } -// SetMaxData sets the MaxData and current NData for both the Network and the Context -func (nt *Network) SetMaxData(simCtx *Context, maxData int) { - nt.NetIxs().MaxData = uint32(maxData) - simCtx.NData = uint32(maxData) -} - // Build constructs the layer and pathway state based on the layer shapes // and patterns of interconnectivity. -func (nt *Network) Build(simCtx *Context) error { //types:add +func (nt *Network) Build() error { //types:add nix := nt.NetIxs() nt.MakeLayerMaps() if nt.Rubicon.NPosUSs == 0 { - nt.Rubicon.SetNUSs(simCtx, 1, 1) + nt.Rubicon.SetNUSs(1, 1) } nt.Rubicon.Update() - ctx := nt.Context() - *ctx = *simCtx nt.FunTimes = make(map[string]*timer.Time) maxData := int(nix.MaxData) var errs []error @@ -695,7 +712,6 @@ func (nt *Network) Build(simCtx *Context) error { //types:add nix.RubiconNPosUSs = nt.Rubicon.NPosUSs nix.RubiconNNegUSs = nt.Rubicon.NNegUSs - fmt.Println("totPools", totPools) nt.LayParams = make([]LayerParams, nLayers) sltensor.SetShapeSizes(&nt.LayerStates, int(LayerVarsN), nLayers, maxData) sltensor.SetShapeSizes(&nt.Pools, int(PoolVarsN), totPools, maxData) @@ -1028,6 +1044,58 @@ func (nt *Network) CheckSameSize(on *Network) error { return nil } +// SizeReport returns a string reporting the size of each layer and pathway +// in the network, and total memory footprint. +// If detail flag is true, details per layer, pathway is included. +func (nt *Network) SizeReport(detail bool) string { + var b strings.Builder + + varBytes := 4 + synVarBytes := 4 + nix := nt.NetIxs() + maxData := int(nix.MaxData) + memNeuron := int(NeuronVarsN)*maxData*varBytes + int(NeuronAvgVarsN)*varBytes + int(NeuronIndexVarsN)*varBytes + memSynapse := int(SynapseVarsN)*varBytes + int(SynapseTraceVarsN)*maxData*varBytes + int(SynapseIndexVarsN)*varBytes + + globalProjIndexes := 0 + + for _, ly := range nt.Layers { + if detail { + nn := int(ly.NNeurons) + // Sizeof returns size of struct in bytes + nrnMem := nn * memNeuron + fmt.Fprintf(&b, "%14s:\t Neurons: %d\t NeurMem: %v \t Sends To:\n", ly.Name, nn, + (datasize.Size)(nrnMem).String()) + } + for _, pj := range ly.SendPaths { + // We only calculate the size of the important parts of the proj struct: + // 1. Synapse slice (consists of Synapse struct) + // 2. RecvConIndex + RecvSynIndex + SendConIndex (consists of int32 indices = 4B) + // Everything else (like eg the GBuf) is not included in the size calculation, as their size + // doesn't grow quadratically with the number of neurons, and hence pales when compared to the synapses + // It's also useful to run a -memprofile=mem.prof to validate actual memory usage + projMemIndexes := len(pj.RecvConIndex)*varBytes + len(pj.RecvSynIndex)*varBytes + len(pj.SendConIndex)*varBytes + globalProjIndexes += projMemIndexes + if detail { + nSyn := int(pj.NSyns) + synMem := nSyn*memSynapse + projMemIndexes + fmt.Fprintf(&b, "\t%14s:\t Syns: %d\t SynnMem: %v\n", pj.Recv.Name, + nSyn, (datasize.Size)(synMem).String()) + } + } + } + + nrnMem := (nt.Neurons.Len() + nt.NeuronAvgs.Len() + nt.NeuronIxs.Len()) * varBytes + synIndexMem := nt.SynapseIxs.Len() * varBytes + synWtMem := nt.Synapses.Len() * synVarBytes + synCaMem := nt.SynapseTraces.Len() * synVarBytes + + fmt.Fprintf(&b, "\n\n%14s:\t Neurons: %d\t NeurMem: %v \t Syns: %d \t SynIndexes: %v \t SynWts: %v \t SynCa: %v\n", + nt.Name, nix.NNeurons, (datasize.Size)(nrnMem).String(), nix.NSyns, + (datasize.Size)(synIndexMem).String(), (datasize.Size)(synWtMem).String(), (datasize.Size)(synCaMem).String()) + return b.String() +} + // CopyStateFrom copies entire network state from other network. // Other network must have identical configuration, as this just // does a literal copy of the state values. This is checked @@ -1113,3 +1181,32 @@ func (nt *Network) DiffFrom(ctx *Context, on *Network, maxDiff int) string { } return diffs } + +func (nt *Network) MakeToolbar(p *tree.Plan) { + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.ShowAllGlobals).SetText("Global Vars").SetIcon(icons.Info) + }) + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.SaveWeightsJSON). + SetText("Save Weights").SetIcon(icons.Save) + w.Args[0].SetTag(`extension:".wts,.wts.gz"`) + }) + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.OpenWeightsJSON).SetText("Open Weights").SetIcon(icons.Open) + w.Args[0].SetTag(`extension:".wts,.wts.gz"`) + }) + + tree.Add(p, func(w *core.Separator) {}) + + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.Build).SetIcon(icons.Reset) + }) + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.InitWeights).SetIcon(icons.Reset) + }) + tree.Add(p, func(w *core.FuncButton) { + w.SetFunc(nt.InitActs).SetIcon(icons.Reset) + }) +} + + diff --git a/axon/networkbase_test.go b/axon/networkbase_test.go index 7d4008c0..019040f8 100644 --- a/axon/networkbase_test.go +++ b/axon/networkbase_test.go @@ -31,10 +31,10 @@ func TestDefaults(t *testing.T) { net.ConnectLayers(input, hidden, full, ForwardPath) net.BidirConnectLayers(hidden, output, full) - ctx := NewContext() - assert.Nil(t, net.Build(ctx)) + assert.Nil(t, net.Build()) net.Defaults() - net.InitWeights(ctx) + net.InitWeights() + ctx := net.Context() assert.Equal(t, 100, int(ctx.SlowInterval)) assert.Equal(t, 0, int(ctx.SlowCtr)) @@ -227,8 +227,7 @@ var stdWeights = `{ func TestSaveWeights(t *testing.T) { var b bytes.Buffer - ctx := NewContext() - testNet := newTestNet(ctx, 1) + testNet := newTestNet(1) err := testNet.WriteWeightsJSON(&b) if err != nil { t.Error(err.Error()) @@ -236,7 +235,7 @@ func TestSaveWeights(t *testing.T) { assert.Equal(t, stdWeights, string(b.Bytes())) // fmt.Println(string(b.Bytes())) - loadNet := newTestNet(ctx, 1) + loadNet := newTestNet(1) err = loadNet.ReadWeightsJSON(&b) if err != nil { t.Error(err.Error()) diff --git a/axon/pool.go b/axon/pool.go index 01b0b619..f8c0a9da 100644 --- a/axon/pool.go +++ b/axon/pool.go @@ -9,7 +9,6 @@ package axon import ( "cogentcore.org/core/base/atomicx" "cogentcore.org/core/math32" - "fmt" "github.com/emer/axon/v2/fsfffb" "log" "sync/atomic" @@ -281,7 +280,6 @@ func PoolPoolGi(ctx *Context, pi, di uint32) { return } li := PoolsInt.Value(int(PoolLayerIdx), int(pi), int(di)) - fmt.Println(li, pi, di) PoolAvgMaxCalc(pi, di) PoolInhibIntToRaw(pi, di) ly := GetLayers(uint32(li)) diff --git a/axon/pool.goal b/axon/pool.goal index c3833ea2..115a54a4 100644 --- a/axon/pool.goal +++ b/axon/pool.goal @@ -5,7 +5,6 @@ package axon import ( - "fmt" "log" "sync/atomic" "cogentcore.org/core/base/atomicx" @@ -279,7 +278,6 @@ func PoolPoolGi(ctx *Context, pi, di uint32) { return } li := PoolsInt[PoolLayerIdx, pi, di] - fmt.Println(li, pi, di) PoolAvgMaxCalc(pi, di) PoolInhibIntToRaw(pi, di) ly := GetLayers(uint32(li)) diff --git a/axon/pool_test.go b/axon/pool_test.go index 897fe92c..ee5e6ae8 100644 --- a/axon/pool_test.go +++ b/axon/pool_test.go @@ -8,6 +8,7 @@ import ( "os" "testing" + "github.com/emer/emergent/v2/etime" "github.com/emer/emergent/v2/params" "github.com/emer/emergent/v2/paths" ) @@ -99,10 +100,10 @@ var PoolParamSets = params.Sets{ }, } -func newPoolTestNet(ctx *Context, nData int) *Network { +func newPoolTestNet(nData int) *Network { testNet := NewNetwork("testNet") testNet.SetRandSeed(42) // critical for ActAvg values - testNet.SetMaxData(ctx, nData) + testNet.SetMaxData(nData) inLay := testNet.AddLayer4D("Input", InputLayer, 4, 1, 1, 4) hidLay := testNet.AddLayer4D("Hidden", SuperLayer, 4, 1, 1, 4) // note: tried with up to 400 -- no diff @@ -113,12 +114,11 @@ func newPoolTestNet(ctx *Context, nData int) *Network { testNet.ConnectLayers(hidLay, outLay, paths.NewOneToOne(), ForwardPath) testNet.ConnectLayers(outLay, hidLay, paths.NewOneToOne(), BackPath) - testNet.Build(ctx) - ctx.NData = uint32(nData) + testNet.Build() testNet.Defaults() testNet.ApplyParams(PoolParamSets["Base"], false) // false) // true) // no msg - testNet.InitWeights(ctx) // get GScale here - testNet.NewState(ctx) + testNet.InitWeights() // get GScale here + testNet.NewState(etime.Train) return testNet } @@ -129,7 +129,7 @@ func TestPoolGPUDiffsLayerOnly(t *testing.T) { } cpuValues := netDebugAct(t, "LayerOnly", false, false, 1, true) gpuValues := netDebugAct(t, "LayerOnly", false, true, 1, true) - ReportValDiffs(t, Tol4, cpuValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol4, cpuValues, gpuValues, "CPU", "GPU") } func TestPoolGPUDiffsPoolOnly(t *testing.T) { @@ -140,7 +140,7 @@ func TestPoolGPUDiffsPoolOnly(t *testing.T) { cpuValues := netDebugAct(t, "PoolOnly", false, false, 1, true) gpuValues := netDebugAct(t, "PoolOnly", false, true, 1, true) // GPU doesn't update layer Gi, GiOrig.. - ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU", []string{"Gi", "GiOrig"}) + ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU", "Gi", "GiOrig") } func TestPoolGPUDiffsLayerPoolSame(t *testing.T) { @@ -150,7 +150,7 @@ func TestPoolGPUDiffsLayerPoolSame(t *testing.T) { } cpuValues := netDebugAct(t, "LayerPoolSame", false, false, 1, true) gpuValues := netDebugAct(t, "LayerPoolSame", false, true, 1, true) - ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU") } func TestPoolGPUDiffsLayerWeakPoolStrong(t *testing.T) { @@ -160,7 +160,7 @@ func TestPoolGPUDiffsLayerWeakPoolStrong(t *testing.T) { } cpuValues := netDebugAct(t, "LayerWeakPoolStrong", false, false, 1, true) gpuValues := netDebugAct(t, "LayerWeakPoolStrong", false, true, 1, true) - ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU") } func TestPoolGPUDiffsLayerStrongPoolWeak(t *testing.T) { @@ -170,17 +170,16 @@ func TestPoolGPUDiffsLayerStrongPoolWeak(t *testing.T) { } cpuValues := netDebugAct(t, "LayerStrongPoolWeak", false, false, 1, true) gpuValues := netDebugAct(t, "LayerStrongPoolWeak", false, true, 1, true) - ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU", nil) + ReportValDiffs(t, Tol3, cpuValues, gpuValues, "CPU", "GPU") } // netDebugAct prints selected values (if printValues), // and also returns a map of all values and variables that can be used for a more // fine-grained diff test, e.g., see the GPU version. func netDebugAct(t *testing.T, params string, printValues bool, gpu bool, nData int, initWts bool) map[string]float32 { - ctx := NewContext() - testNet := newPoolTestNet(ctx, nData) + testNet := newPoolTestNet(nData) testNet.ApplyParams(PoolParamSets["FullDecay"], false) testNet.ApplyParams(PoolParamSets[params], false) - return RunDebugAct(t, ctx, testNet, printValues, gpu, initWts) + return RunDebugAct(t, testNet, printValues, gpu, initWts) } diff --git a/axon/rubicon.go b/axon/rubicon.go index 16c8c329..46c8690d 100644 --- a/axon/rubicon.go +++ b/axon/rubicon.go @@ -74,19 +74,19 @@ func (dp *DriveParams) Update() { } // VarToZero sets all values of given drive-sized variable to 0 -func (dp *DriveParams) VarToZero(ctx *Context, di uint32, gvar GlobalVectorVars) { +func (dp *DriveParams) VarToZero(di uint32, gvar GlobalVectorVars) { for i := range dp.Base { GlobalVectors.Set(0, int(gvar), int(i), int(di)) } } // ToZero sets all drives to 0 -func (dp *DriveParams) ToZero(ctx *Context, di uint32) { - dp.VarToZero(ctx, di, GvDrives) +func (dp *DriveParams) ToZero(di uint32) { + dp.VarToZero(di, GvDrives) } // ToBaseline sets all drives to their baseline levels -func (dp *DriveParams) ToBaseline(ctx *Context, di uint32) { +func (dp *DriveParams) ToBaseline(di uint32) { for i := range dp.Base { GlobalVectors.Set(dp.Base[i], int(GvDrives), int(i), int(di)) } @@ -94,7 +94,7 @@ func (dp *DriveParams) ToBaseline(ctx *Context, di uint32) { // AddTo increments drive by given amount, subject to 0-1 range clamping. // Returns new val. -func (dp *DriveParams) AddTo(ctx *Context, di uint32, drv uint32, delta float32) float32 { +func (dp *DriveParams) AddTo(di uint32, drv uint32, delta float32) float32 { dv := GlobalVectors.Value(int(GvDrives), int(drv), int(di)) + delta if dv > 1 { dv = 1 @@ -107,7 +107,7 @@ func (dp *DriveParams) AddTo(ctx *Context, di uint32, drv uint32, delta float32) // SoftAdd increments drive by given amount, using soft-bounding to 0-1 extremes. // if delta is positive, multiply by 1-val, else val. Returns new val. -func (dp *DriveParams) SoftAdd(ctx *Context, di uint32, drv uint32, delta float32) float32 { +func (dp *DriveParams) SoftAdd(di uint32, drv uint32, delta float32) float32 { dv := GlobalVectors.Value(int(GvDrives), int(drv), int(di)) if delta > 0 { dv += (1 - dv) * delta @@ -125,7 +125,7 @@ func (dp *DriveParams) SoftAdd(ctx *Context, di uint32, drv uint32, delta float3 // ExpStep updates drive with an exponential step with given dt value // toward given baseline value. -func (dp *DriveParams) ExpStep(ctx *Context, di uint32, drv uint32, dt, base float32) float32 { +func (dp *DriveParams) ExpStep(di uint32, drv uint32, dt, base float32) float32 { dv := GlobalVectors.Value(int(GvDrives), int(drv), int(di)) dv += dt * (base - dv) if dv > 1 { @@ -139,15 +139,15 @@ func (dp *DriveParams) ExpStep(ctx *Context, di uint32, drv uint32, dt, base flo // ExpStepAll updates given drives with an exponential step using dt values // toward baseline values. -func (dp *DriveParams) ExpStepAll(ctx *Context, di uint32) { +func (dp *DriveParams) ExpStepAll(di uint32) { for i := range dp.Base { - dp.ExpStep(ctx, di, uint32(i), dp.Dt[i], dp.Base[i]) + dp.ExpStep(di, uint32(i), dp.Dt[i], dp.Base[i]) } } // EffectiveDrive returns the Max of Drives at given index and DriveMin. // note that index 0 is the novelty / curiosity drive, which doesn't use DriveMin. -func (dp *DriveParams) EffectiveDrive(ctx *Context, di uint32, i uint32) float32 { +func (dp *DriveParams) EffectiveDrive(di uint32, i uint32) float32 { if i == 0 { return GlobalVectors.Value(int(GvDrives), int(0), int(di)) } @@ -203,13 +203,13 @@ func (ur *UrgencyParams) UrgeFun(urgency float32) float32 { } // Reset resets the raw urgency back to zero -- at start of new gating event -func (ur *UrgencyParams) Reset(ctx *Context, di uint32) { +func (ur *UrgencyParams) Reset(di uint32) { GlobalScalars.Set(0, int(GvUrgencyRaw), int(di)) GlobalScalars.Set(0, int(GvUrgency), int(di)) } // Urge computes normalized Urge value from Raw, and sets DAtonic from that -func (ur *UrgencyParams) Urge(ctx *Context, di uint32) float32 { +func (ur *UrgencyParams) Urge(di uint32) float32 { urge := ur.UrgeFun(GlobalScalars.Value(int(GvUrgencyRaw), int(di))) if urge < ur.Thr { urge = 0 @@ -220,9 +220,9 @@ func (ur *UrgencyParams) Urge(ctx *Context, di uint32) float32 { } // AddEffort adds an effort increment of urgency and updates the Urge factor -func (ur *UrgencyParams) AddEffort(ctx *Context, di uint32, inc float32) { +func (ur *UrgencyParams) AddEffort(di uint32, inc float32) { GlobalScalars.SetAdd(inc, int(GvUrgencyRaw), int(di)) - ur.Urge(ctx, di) + ur.Urge(di) } ///////////////////////////////////////////////////////// @@ -327,7 +327,7 @@ func (us *USParams) Update() { } // USnegCostFromRaw sets normalized NegUS, Cost values from Raw values -func (us *USParams) USnegCostFromRaw(ctx *Context, di uint32) { +func (us *USParams) USnegCostFromRaw(di uint32) { for i, ng := range us.USnegGains { raw := GlobalVectors.Value(int(GvUSnegRaw), int(i), int(di)) norm := RubiconNormFun(ng * raw) @@ -341,7 +341,7 @@ func (us *USParams) USnegCostFromRaw(ctx *Context, di uint32) { } // USnegToZero sets all values of USneg, USnegRaw to zero -func (us *USParams) USnegToZero(ctx *Context, di uint32) { +func (us *USParams) USnegToZero(di uint32) { for i := range us.USnegGains { GlobalVectors.Set(0, int(GvUSneg), int(i), int(di)) GlobalVectors.Set(0, int(GvUSnegRaw), int(i), int(di)) @@ -349,7 +349,7 @@ func (us *USParams) USnegToZero(ctx *Context, di uint32) { } // CostToZero sets all values of Cost, CostRaw to zero -func (us *USParams) CostToZero(ctx *Context, di uint32) { +func (us *USParams) CostToZero(di uint32) { for i := range us.CostGains { GlobalVectors.Set(0, int(GvCost), int(i), int(di)) GlobalVectors.Set(0, int(GvCostRaw), int(i), int(di)) @@ -357,7 +357,7 @@ func (us *USParams) CostToZero(ctx *Context, di uint32) { } // USposToZero sets all values of USpos to zero -func (us *USParams) USposToZero(ctx *Context, di uint32) { +func (us *USParams) USposToZero(di uint32) { for i := range us.PVposWts { GlobalVectors.Set(0, int(GvUSpos), int(i), int(di)) } @@ -419,7 +419,7 @@ func (lh *LHbParams) Update() { } // Reset resets all LHb vars back to 0 -func (lh *LHbParams) Reset(ctx *Context, di uint32) { +func (lh *LHbParams) Reset(di uint32) { GlobalScalars.Set(0, int(GvLHbDip), int(di)) GlobalScalars.Set(0, int(GvLHbBurst), int(di)) GlobalScalars.Set(0, int(GvLHbPVDA), int(di)) @@ -455,7 +455,7 @@ func (lh *LHbParams) DAFromPVs(pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) // and PVDA ~= Burst - Dip, for case when there is a primary // positive reward value or a give-up state has triggered. // Returns the overall net reward magnitude, prior to VSPatch discounting. -func (lh *LHbParams) DAforUS(ctx *Context, di uint32, pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) float32 { +func (lh *LHbParams) DAforUS(di uint32, pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) float32 { burst, dip, da, rew := lh.DAFromPVs(pvPos, pvNeg, vsPatchPos, vsPatchPosSum) GlobalScalars.Set(dip, int(GvLHbDip), int(di)) GlobalScalars.Set(burst, int(GvLHbBurst), int(di)) @@ -472,7 +472,7 @@ func (lh *LHbParams) DAforUS(ctx *Context, di uint32, pvPos, pvNeg, vsPatchPos, // Because the LHb only responds when it decides to GiveUp, // there is no response in this case. // DA is instead driven by CS-based computation, in rubicon_layers.go, VTAParams.VTADA -func (lh *LHbParams) DAforNoUS(ctx *Context, di uint32) float32 { +func (lh *LHbParams) DAforNoUS(di uint32) float32 { GlobalScalars.Set(0, int(GvLHbDip), int(di)) GlobalScalars.Set(0, int(GvLHbBurst), int(di)) GlobalScalars.Set(0, int(GvLHbPVDA), int(di)) @@ -556,7 +556,7 @@ func (gp *GiveUpParams) Prob(cnSum, guSum float32, rnd randx.Rand) (float32, boo // Sums computes the summed weighting factors that drive continue and give up // contributions to the probability function. -func (gp *GiveUpParams) Sums(ctx *Context, di uint32) (cnSum, guSum float32) { +func (gp *GiveUpParams) Sums(di uint32) (cnSum, guSum float32) { negSum := GlobalScalars.Value(int(GvPVnegSum), int(di)) guU := gp.Utility * max(gp.MinUtility, negSum) cnU := gp.Utility * max(gp.MinUtility, GlobalScalars.Value(int(GvPVposEst), int(di))) // todo: var? @@ -696,7 +696,7 @@ func (rp *Rubicon) USnegIndex(simUsIndex int) int { // Two costs (Time, Effort) are also automatically allocated and managed. // The USs specified here need to be managed by the simulation via the SetUS method. // Positive USs each have corresponding Drives. -func (rp *Rubicon) SetNUSs(ctx *Context, nPos, nNeg int) { +func (rp *Rubicon) SetNUSs(nPos, nNeg int) { nix := GetNetworkIxs(0) nPos = rp.USposIndex(max(nPos, 1)) nNeg = rp.USnegIndex(max(nNeg, 1)) // ensure at least 1 @@ -711,15 +711,15 @@ func (rp *Rubicon) SetNUSs(ctx *Context, nPos, nNeg int) { } // Reset resets all Rubicon state -func (rp *Rubicon) Reset(ctx *Context, di uint32) { - rp.Drive.ToBaseline(ctx, di) - rp.TimeEffortReset(ctx, di) - rp.Urgency.Reset(ctx, di) - rp.InitUS(ctx, di) - rp.LHb.Reset(ctx, di) - rp.Drive.VarToZero(ctx, di, GvVSPatchD1) - rp.Drive.VarToZero(ctx, di, GvVSPatchD2) - rp.ResetGoalState(ctx, di) +func (rp *Rubicon) Reset(di uint32) { + rp.Drive.ToBaseline(di) + rp.TimeEffortReset(di) + rp.Urgency.Reset(di) + rp.InitUS(di) + rp.LHb.Reset(di) + rp.Drive.VarToZero(di, GvVSPatchD1) + rp.Drive.VarToZero(di, GvVSPatchD2) + rp.ResetGoalState(di) GlobalScalars.Set(0, int(GvVtaDA), int(di)) GlobalScalars.Set(0, int(GvVSMatrixJustGated), int(di)) GlobalScalars.Set(0, int(GvVSMatrixHasGated), int(di)) @@ -728,21 +728,21 @@ func (rp *Rubicon) Reset(ctx *Context, di uint32) { } // InitUS initializes all the USs to zero -func (rp *Rubicon) InitUS(ctx *Context, di uint32) { - rp.USs.USposToZero(ctx, di) - rp.USs.USnegToZero(ctx, di) - rp.USs.CostToZero(ctx, di) +func (rp *Rubicon) InitUS(di uint32) { + rp.USs.USposToZero(di) + rp.USs.USnegToZero(di) + rp.USs.CostToZero(di) GlobalScalars.Set(0, int(GvHasRew), int(di)) GlobalScalars.Set(0, int(GvRew), int(di)) } // InitDrives initializes all the Drives to baseline values (default = 0) -func (rp *Rubicon) InitDrives(ctx *Context, di uint32) { - rp.Drive.ToBaseline(ctx, di) +func (rp *Rubicon) InitDrives(di uint32) { + rp.Drive.ToBaseline(di) } // AddTimeEffort adds a unit of time and an increment of effort -func (rp *Rubicon) AddTimeEffort(ctx *Context, di uint32, effort float32) { +func (rp *Rubicon) AddTimeEffort(di uint32, effort float32) { GlobalScalars.SetAdd(1, int(GvTime), int(di)) tm := GlobalScalars.Value(int(GvTime), int(di)) GlobalVectors.Set(tm, int(GvCostRaw), int(0), int(di)) // time is neg 0 @@ -758,17 +758,17 @@ func (rp *Rubicon) AddTimeEffort(ctx *Context, di uint32, effort float32) { // and Urgency updates otherwise (when not goal engaged) // Call this at the start of the trial, in ApplyRubicon method, // after NewState. -func (rp *Rubicon) EffortUrgencyUpdate(ctx *Context, di uint32, effort float32) { +func (rp *Rubicon) EffortUrgencyUpdate(di uint32, effort float32) { if GlobalScalars.Value(int(GvVSMatrixHasGated), int(di)) > 0 { - rp.AddTimeEffort(ctx, di, effort) + rp.AddTimeEffort(di, effort) } else { - rp.Urgency.AddEffort(ctx, di, effort) + rp.Urgency.AddEffort(di, effort) } } // TimeEffortReset resets the raw time and effort back to zero, // at start of new gating event -func (rp *Rubicon) TimeEffortReset(ctx *Context, di uint32) { +func (rp *Rubicon) TimeEffortReset(di uint32) { GlobalScalars.Set(0, int(GvTime), int(di)) GlobalScalars.Set(0, int(GvEffort), int(di)) GlobalVectors.Set(0, int(GvCostRaw), int(0), int(di)) // effort is neg 0 @@ -782,12 +782,12 @@ func (rp *Rubicon) TimeEffortReset(ctx *Context, di uint32) { // This is not called directly in the Rubicon code -- can be used to compute // what the Rubicon code itself will compute -- see LHbPVDA // todo: this is not very meaningful anymore -// func (pp *Rubicon) PVposFromDriveEffort(ctx *Context, usValue, drive, effort float32) float32 { +// func (pp *Rubicon) PVposFromDriveEffort(usValue, drive, effort float32) float32 { // return usValue * drive * (1 - RubiconNormFun(pp.USs.PVnegWts[0]*effort)) // } // RubiconSetDrive sets given Drive to given value -func (rp *Rubicon) SetDrive(ctx *Context, di uint32, dr uint32, val float32) { +func (rp *Rubicon) SetDrive(di uint32, dr uint32, val float32) { GlobalVectors.Set(val, int(GvDrives), int(dr), int(di)) } @@ -795,19 +795,19 @@ func (rp *Rubicon) SetDrive(ctx *Context, di uint32, dr uint32, val float32) { // curiosity sets the strength for the curiosity drive // and drives are strengths of the remaining sim-specified drives, in order. // any drives not so specified are at the InitDrives baseline level. -func (rp *Rubicon) SetDrives(ctx *Context, di uint32, curiosity float32, drives ...float32) { - rp.InitDrives(ctx, di) - rp.SetDrive(ctx, di, 0, curiosity) +func (rp *Rubicon) SetDrives(di uint32, curiosity float32, drives ...float32) { + rp.InitDrives(di) + rp.SetDrive(di, 0, curiosity) for i, v := range drives { - rp.SetDrive(ctx, di, uint32(1+i), v) + rp.SetDrive(di, uint32(1+i), v) } } // DriveUpdate is used when auto-updating drive levels based on US consumption, // which partially satisfies (decrements) corresponding drive, // and on time passing, where drives adapt to their overall baseline levels. -func (rp *Rubicon) DriveUpdate(ctx *Context, di uint32) { - rp.Drive.ExpStepAll(ctx, di) +func (rp *Rubicon) DriveUpdate(di uint32) { + rp.Drive.ExpStepAll(di) nd := rp.NPosUSs for i := uint32(0); i < nd; i++ { us := GlobalVectors.Value(int(GvUSpos), int(i), int(di)) @@ -825,7 +825,7 @@ func (rp *Rubicon) DriveUpdate(ctx *Context, di uint32) { // and sets the global HasRew flag, thus triggering a US learning event. // Note that costs can be used to track negative USs that are not strong // enough to trigger a US learning event. -func (rp *Rubicon) SetUS(ctx *Context, di uint32, valence ValenceTypes, usIndex int, magnitude float32) { +func (rp *Rubicon) SetUS(di uint32, valence ValenceTypes, usIndex int, magnitude float32) { GlobalScalars.Set(1, int(GvHasRew), int(di)) if valence == Positive { usIndex = rp.USposIndex(usIndex) @@ -840,13 +840,13 @@ func (rp *Rubicon) SetUS(ctx *Context, di uint32, valence ValenceTypes, usIndex // ResetGoalState resets all the goal-engaged global values. // Critically, this is only called after goal accomplishment, // not after goal gating -- prevents "shortcutting" by re-gating. -func (rp *Rubicon) ResetGoalState(ctx *Context, di uint32) { +func (rp *Rubicon) ResetGoalState(di uint32) { GlobalScalars.Set(0, int(GvVSMatrixHasGated), int(di)) - rp.Urgency.Reset(ctx, di) - rp.TimeEffortReset(ctx, di) - rp.USs.USnegToZero(ctx, di) // all negs restart - rp.USs.CostToZero(ctx, di) - rp.ResetGiveUp(ctx, di) + rp.Urgency.Reset(di) + rp.TimeEffortReset(di) + rp.USs.USnegToZero(di) // all negs restart + rp.USs.CostToZero(di) + rp.ResetGiveUp(di) GlobalScalars.Set(0, int(GvVSPatchPos), int(di)) GlobalScalars.Set(0, int(GvVSPatchPosSum), int(di)) GlobalScalars.Set(0, int(GvVSPatchPosPrev), int(di)) @@ -863,7 +863,7 @@ func (rp *Rubicon) ResetGoalState(ctx *Context, di uint32) { } // ResetGiveUp resets all the give-up related global values. -func (rp *Rubicon) ResetGiveUp(ctx *Context, di uint32) { +func (rp *Rubicon) ResetGiveUp(di uint32) { GlobalScalars.Set(0, int(GvPVposEst), int(di)) GlobalScalars.Set(0, int(GvPVposVar), int(di)) GlobalScalars.Set(0, int(GvGiveUpProb), int(di)) @@ -873,7 +873,7 @@ func (rp *Rubicon) ResetGiveUp(ctx *Context, di uint32) { // NewState is called at very start of new state (trial) of processing. // sets HadRew = HasRew from last trial -- used to then reset various things // after reward. -func (rp *Rubicon) NewState(ctx *Context, di uint32, rnd randx.Rand) { +func (rp *Rubicon) NewState(di uint32, rnd randx.Rand) { hadRewF := GlobalScalars.Value(int(GvHasRew), int(di)) hadRew := num.ToBool(hadRewF) GlobalScalars.Set(hadRewF, int(GvHadRew), int(di)) @@ -884,23 +884,23 @@ func (rp *Rubicon) NewState(ctx *Context, di uint32, rnd randx.Rand) { GlobalScalars.Set(0, int(GvHasRew), int(di)) GlobalScalars.Set(0, int(GvNegUSOutcome), int(di)) - rp.VSPatchNewState(ctx, di) + rp.VSPatchNewState(di) if hadRew { - rp.ResetGoalState(ctx, di) + rp.ResetGoalState(di) } else if GlobalScalars.Value(int(GvVSMatrixJustGated), int(di)) > 0 { GlobalScalars.Set(1, int(GvVSMatrixHasGated), int(di)) - rp.Urgency.Reset(ctx, di) + rp.Urgency.Reset(di) } GlobalScalars.Set(0, int(GvVSMatrixJustGated), int(di)) - rp.USs.USposToZero(ctx, di) // pos USs must be set fresh every time + rp.USs.USposToZero(di) // pos USs must be set fresh every time } // Step does one step (trial) after applying USs, Drives, // and updating Effort. It should be the final call in ApplyRubicon. // Calls PVDA which does all US, PV, LHb, GiveUp updating. -func (rp *Rubicon) Step(ctx *Context, di uint32, rnd randx.Rand) { - rp.PVDA(ctx, di, rnd) +func (rp *Rubicon) Step(di uint32, rnd randx.Rand) { + rp.PVDA(di, rnd) } // SetGoalMaintFromLayer sets the GoalMaint global state variable @@ -908,7 +908,7 @@ func (rp *Rubicon) Step(ctx *Context, di uint32, rnd randx.Rand) { // GoalMaint is normalized 0-1 based on the given max activity level, // with anything out of range clamped to 0-1 range. // Returns (and logs) an error if layer name not found. -func (rp *Rubicon) SetGoalMaintFromLayer(ctx *Context, di uint32, net *Network, layName string, maxAct float32) error { +func (rp *Rubicon) SetGoalMaintFromLayer(di uint32, net *Network, layName string, maxAct float32) error { ly := net.LayerByName(layName) if ly == nil { err := fmt.Errorf("SetGoalMaintFromLayer: layer named: %q not found", layName) @@ -929,7 +929,7 @@ func (rp *Rubicon) SetGoalMaintFromLayer(ctx *Context, di uint32, net *Network, // DecodeFromLayer decodes value and variance from the average activity (CaSpkD) // of the given layer name. Use for decoding PVposEst and Var, and PVnegEst and Var -func (rp *Rubicon) DecodeFromLayer(ctx *Context, di uint32, net *Network, layName string) (val, vr float32, err error) { +func (rp *Rubicon) DecodeFromLayer(di uint32, net *Network, layName string) (val, vr float32, err error) { ly := net.LayerByName(layName) if ly == nil { err = fmt.Errorf("DecodeFromLayer: layer named: %q not found", layName) @@ -944,14 +944,14 @@ func (rp *Rubicon) DecodeFromLayer(ctx *Context, di uint32, net *Network, layNam // DecodePVEsts decodes estimated PV outcome values from PVposP and PVnegP // prediction layers, saves in global PVposEst, Var and PVnegEst, Var -func (rp *Rubicon) DecodePVEsts(ctx *Context, di uint32, net *Network) { - posEst, posVar, err := rp.DecodeFromLayer(ctx, di, net, "PVposP") +func (rp *Rubicon) DecodePVEsts(di uint32, net *Network) { + posEst, posVar, err := rp.DecodeFromLayer(di, net, "PVposP") if err == nil { GlobalScalars.Set(posEst, int(GvPVposEst), int(di)) GlobalScalars.Set(posVar, int(GvPVposVar), int(di)) } - negEst, negVar, err := rp.DecodeFromLayer(ctx, di, net, "PVnegP") + negEst, negVar, err := rp.DecodeFromLayer(di, net, "PVnegP") if err == nil { GlobalScalars.Set(negEst, int(GvPVnegEst), int(di)) GlobalScalars.Set(negVar, int(GvPVnegVar), int(di)) @@ -962,7 +962,7 @@ func (rp *Rubicon) DecodePVEsts(ctx *Context, di uint32, net *Network) { // in trial step units, which should decrease to 0 at the goal. // This should be set at the start of every trial. // Also computes the ProgressRate. -func (rp *Rubicon) SetGoalDistEst(ctx *Context, di uint32, dist float32) { +func (rp *Rubicon) SetGoalDistEst(di uint32, dist float32) { if GlobalScalars.Value(int(GvVSMatrixHasGated), int(di)) == 0 { GlobalScalars.Set(dist, int(GvGoalDistPrev), int(di)) GlobalScalars.Set(dist, int(GvGoalDistEst), int(di)) @@ -982,7 +982,7 @@ func (rp *Rubicon) SetGoalDistEst(ctx *Context, di uint32, dist float32) { // methods below used in computing Rubicon state, not generally called from sims // HasPosUS returns true if there is at least one non-zero positive US -func (rp *Rubicon) HasPosUS(ctx *Context, di uint32) bool { +func (rp *Rubicon) HasPosUS(di uint32) bool { nd := rp.NPosUSs for i := uint32(0); i < nd; i++ { if GlobalVectors.Value(int(GvUSpos), int(i), int(di)) > 0 { @@ -997,11 +997,11 @@ func (rp *Rubicon) HasPosUS(ctx *Context, di uint32) bool { // its current drive and weighting factor (pvPosSum), // and the normalized version of this sum (PVpos = overall positive PV) // as 1 / (1 + (PVposGain * pvPosSum)) -func (rp *Rubicon) PVpos(ctx *Context, di uint32) (pvPosSum, pvPos float32) { +func (rp *Rubicon) PVpos(di uint32) (pvPosSum, pvPos float32) { nd := rp.NPosUSs wts := rp.USs.PVposWts for i := uint32(0); i < nd; i++ { - pvPosSum += wts[i] * GlobalVectors.Value(int(GvUSpos), int(i), int(di)) * rp.Drive.EffectiveDrive(ctx, di, i) + pvPosSum += wts[i] * GlobalVectors.Value(int(GvUSpos), int(i), int(di)) * rp.Drive.EffectiveDrive(di, i) } pvPos = RubiconNormFun(rp.USs.PVposGain * pvPosSum) return @@ -1012,7 +1012,7 @@ func (rp *Rubicon) PVpos(ctx *Context, di uint32) (pvPosSum, pvPos float32) { // by a weighting factor and summed (usNegSum) // and the normalized version of this sum (PVneg = overall negative PV) // as 1 / (1 + (PVnegGain * PVnegSum)) -func (rp *Rubicon) PVneg(ctx *Context, di uint32) (pvNegSum, pvNeg float32) { +func (rp *Rubicon) PVneg(di uint32) (pvNegSum, pvNeg float32) { nn := rp.NNegUSs wts := rp.USs.PVnegWts for i := uint32(0); i < nn; i++ { @@ -1029,13 +1029,13 @@ func (rp *Rubicon) PVneg(ctx *Context, di uint32) (pvNegSum, pvNeg float32) { // PVsFromUSs updates the current PV summed, weighted, normalized values // from the underlying US values. -func (rp *Rubicon) PVsFromUSs(ctx *Context, di uint32) { - pvPosSum, pvPos := rp.PVpos(ctx, di) +func (rp *Rubicon) PVsFromUSs(di uint32) { + pvPosSum, pvPos := rp.PVpos(di) GlobalScalars.Set(pvPosSum, int(GvPVposSum), int(di)) GlobalScalars.Set(pvPos, int(GvPVpos), int(di)) - GlobalScalars.Set(num.FromBool[float32](rp.HasPosUS(ctx, di)), int(GvHasPosUS), int(di)) + GlobalScalars.Set(num.FromBool[float32](rp.HasPosUS(di)), int(GvHasPosUS), int(di)) - pvNegSum, pvNeg := rp.PVneg(ctx, di) + pvNegSum, pvNeg := rp.PVneg(di) GlobalScalars.Set(pvNegSum, int(GvPVnegSum), int(di)) GlobalScalars.Set(pvNeg, int(GvPVneg), int(di)) } @@ -1043,7 +1043,7 @@ func (rp *Rubicon) PVsFromUSs(ctx *Context, di uint32) { // VSPatchNewState does VSPatch processing in NewState: // updates global VSPatchPos and VSPatchPosSum, sets to RewPred. // uses max across recorded VSPatch activity levels. -func (rp *Rubicon) VSPatchNewState(ctx *Context, di uint32) { +func (rp *Rubicon) VSPatchNewState(di uint32) { prev := GlobalScalars.Value(int(GvVSPatchPos), int(di)) GlobalScalars.Set(prev, int(GvVSPatchPosPrev), int(di)) mx := float32(0) @@ -1078,14 +1078,14 @@ func (rp *Rubicon) VSPatchNewState(ctx *Context, di uint32) { // PVposEstFromUSs returns the estimated positive PV value // based on drives and given US values. This can be used // to compute estimates to compare network performance. -func (rp *Rubicon) PVposEstFromUSs(ctx *Context, di uint32, uss []float32) (pvPosSum, pvPos float32) { +func (rp *Rubicon) PVposEstFromUSs(di uint32, uss []float32) (pvPosSum, pvPos float32) { nd := rp.NPosUSs if len(uss) < int(nd) { nd = uint32(len(uss)) } wts := rp.USs.PVposWts for i := uint32(0); i < nd; i++ { - pvPosSum += wts[i] * uss[i] * rp.Drive.EffectiveDrive(ctx, di, i) + pvPosSum += wts[i] * uss[i] * rp.Drive.EffectiveDrive(di, i) } pvPos = RubiconNormFun(rp.USs.PVposGain * pvPosSum) return @@ -1143,8 +1143,8 @@ func (rp *Rubicon) DAFromPVs(pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) (b // GiveUpOnGoal determines whether to give up on current goal // based on Utility, Timing, and Progress weight factors. -func (rp *Rubicon) GiveUpOnGoal(ctx *Context, di uint32, rnd randx.Rand) bool { - cnSum, guSum := rp.GiveUp.Sums(ctx, di) +func (rp *Rubicon) GiveUpOnGoal(di uint32, rnd randx.Rand) bool { + cnSum, guSum := rp.GiveUp.Sums(di) prob, giveUp := rp.GiveUp.Prob(cnSum, guSum, rnd) GlobalScalars.Set(prob, int(GvGiveUpProb), int(di)) GlobalScalars.Set(num.FromBool[float32](giveUp), int(GvGiveUp), int(di)) @@ -1157,9 +1157,9 @@ func (rp *Rubicon) GiveUpOnGoal(ctx *Context, di uint32, rnd randx.Rand) bool { // and the resulting values are stored in global variables. // Called after updating USs, Effort, Drives at start of trial step, // in Step. -func (rp *Rubicon) PVDA(ctx *Context, di uint32, rnd randx.Rand) { - rp.USs.USnegCostFromRaw(ctx, di) - rp.PVsFromUSs(ctx, di) +func (rp *Rubicon) PVDA(di uint32, rnd randx.Rand) { + rp.USs.USnegCostFromRaw(di) + rp.PVsFromUSs(di) hasRew := (GlobalScalars.Value(int(GvHasRew), int(di)) > 0) pvPos := GlobalScalars.Value(int(GvPVpos), int(di)) @@ -1168,30 +1168,30 @@ func (rp *Rubicon) PVDA(ctx *Context, di uint32, rnd randx.Rand) { vsPatchPosSum := GlobalScalars.Value(int(GvVSPatchPosSum), int(di)) if hasRew { - rp.ResetGiveUp(ctx, di) - rew := rp.LHb.DAforUS(ctx, di, pvPos, pvNeg, vsPatchPos, vsPatchPosSum) // only when actual pos rew + rp.ResetGiveUp(di) + rew := rp.LHb.DAforUS(di, pvPos, pvNeg, vsPatchPos, vsPatchPosSum) // only when actual pos rew GlobalScalars.Set(rew, int(GvRew), int(di)) return } if GlobalScalars.Value(int(GvVSMatrixHasGated), int(di)) > 0 { - giveUp := rp.GiveUpOnGoal(ctx, di, rnd) + giveUp := rp.GiveUpOnGoal(di, rnd) if giveUp { - GlobalScalars.Set(1, int(GvHasRew), int(di)) // key for triggering reset - rew := rp.LHb.DAforUS(ctx, di, pvPos, pvNeg, vsPatchPos, vsPatchPosSum) // only when actual rew + GlobalScalars.Set(1, int(GvHasRew), int(di)) // key for triggering reset + rew := rp.LHb.DAforUS(di, pvPos, pvNeg, vsPatchPos, vsPatchPosSum) // only when actual rew GlobalScalars.Set(rew, int(GvRew), int(di)) return } } // no US regular case - rp.LHb.DAforNoUS(ctx, di) + rp.LHb.DAforNoUS(di) GlobalScalars.Set(0, int(GvRew), int(di)) } // GlobalSetRew is a convenience function for setting the external reward // state in Globals variables -func GlobalSetRew(ctx *Context, di uint32, rew float32, hasRew bool) { +func GlobalSetRew(di uint32, rew float32, hasRew bool) { GlobalScalars.Set(num.FromBool[float32](hasRew), int(GvHasRew), int(di)) if hasRew { GlobalScalars.Set(rew, int(GvRew), int(di)) @@ -1204,7 +1204,7 @@ func GlobalSetRew(ctx *Context, di uint32, rew float32, hasRew bool) { // RubiconUSStimValue returns stimulus value for US at given index // and valence (includes Cost). If US > 0.01, a full 1 US activation is returned. -func RubiconUSStimValue(ctx *Context, di uint32, usIndex uint32, valence ValenceTypes) float32 { +func RubiconUSStimValue(di uint32, usIndex uint32, valence ValenceTypes) float32 { nix := GetNetworkIxs(0) us := float32(0) switch valence { diff --git a/axon/rubicon.goal b/axon/rubicon.goal index 446162b2..bfc305ec 100644 --- a/axon/rubicon.goal +++ b/axon/rubicon.goal @@ -72,19 +72,19 @@ func (dp *DriveParams) Update() { } // VarToZero sets all values of given drive-sized variable to 0 -func (dp *DriveParams) VarToZero(ctx *Context, di uint32, gvar GlobalVectorVars) { +func (dp *DriveParams) VarToZero(di uint32, gvar GlobalVectorVars) { for i := range dp.Base { GlobalVectors[gvar, i, di] = 0 } } // ToZero sets all drives to 0 -func (dp *DriveParams) ToZero(ctx *Context, di uint32) { - dp.VarToZero(ctx, di, GvDrives) +func (dp *DriveParams) ToZero(di uint32) { + dp.VarToZero(di, GvDrives) } // ToBaseline sets all drives to their baseline levels -func (dp *DriveParams) ToBaseline(ctx *Context, di uint32) { +func (dp *DriveParams) ToBaseline(di uint32) { for i := range dp.Base { GlobalVectors[GvDrives, i, di] = dp.Base[i] } @@ -92,7 +92,7 @@ func (dp *DriveParams) ToBaseline(ctx *Context, di uint32) { // AddTo increments drive by given amount, subject to 0-1 range clamping. // Returns new val. -func (dp *DriveParams) AddTo(ctx *Context, di uint32, drv uint32, delta float32) float32 { +func (dp *DriveParams) AddTo(di uint32, drv uint32, delta float32) float32 { dv := GlobalVectors[GvDrives, drv, di] + delta if dv > 1 { dv = 1 @@ -105,7 +105,7 @@ func (dp *DriveParams) AddTo(ctx *Context, di uint32, drv uint32, delta float32) // SoftAdd increments drive by given amount, using soft-bounding to 0-1 extremes. // if delta is positive, multiply by 1-val, else val. Returns new val. -func (dp *DriveParams) SoftAdd(ctx *Context, di uint32, drv uint32, delta float32) float32 { +func (dp *DriveParams) SoftAdd(di uint32, drv uint32, delta float32) float32 { dv := GlobalVectors[GvDrives, drv, di] if delta > 0 { dv += (1 - dv) * delta @@ -123,7 +123,7 @@ func (dp *DriveParams) SoftAdd(ctx *Context, di uint32, drv uint32, delta float3 // ExpStep updates drive with an exponential step with given dt value // toward given baseline value. -func (dp *DriveParams) ExpStep(ctx *Context, di uint32, drv uint32, dt, base float32) float32 { +func (dp *DriveParams) ExpStep(di uint32, drv uint32, dt, base float32) float32 { dv := GlobalVectors[GvDrives, drv, di] dv += dt * (base - dv) if dv > 1 { @@ -137,15 +137,15 @@ func (dp *DriveParams) ExpStep(ctx *Context, di uint32, drv uint32, dt, base flo // ExpStepAll updates given drives with an exponential step using dt values // toward baseline values. -func (dp *DriveParams) ExpStepAll(ctx *Context, di uint32) { +func (dp *DriveParams) ExpStepAll(di uint32) { for i := range dp.Base { - dp.ExpStep(ctx, di, uint32(i), dp.Dt[i], dp.Base[i]) + dp.ExpStep(di, uint32(i), dp.Dt[i], dp.Base[i]) } } // EffectiveDrive returns the Max of Drives at given index and DriveMin. // note that index 0 is the novelty / curiosity drive, which doesn't use DriveMin. -func (dp *DriveParams) EffectiveDrive(ctx *Context, di uint32, i uint32) float32 { +func (dp *DriveParams) EffectiveDrive(di uint32, i uint32) float32 { if i == 0 { return GlobalVectors[GvDrives, 0, di] } @@ -201,13 +201,13 @@ func (ur *UrgencyParams) UrgeFun(urgency float32) float32 { } // Reset resets the raw urgency back to zero -- at start of new gating event -func (ur *UrgencyParams) Reset(ctx *Context, di uint32) { +func (ur *UrgencyParams) Reset(di uint32) { GlobalScalars[GvUrgencyRaw, di] = 0 GlobalScalars[GvUrgency, di] = 0 } // Urge computes normalized Urge value from Raw, and sets DAtonic from that -func (ur *UrgencyParams) Urge(ctx *Context, di uint32) float32 { +func (ur *UrgencyParams) Urge(di uint32) float32 { urge := ur.UrgeFun(GlobalScalars[GvUrgencyRaw, di]) if urge < ur.Thr { urge = 0 @@ -218,9 +218,9 @@ func (ur *UrgencyParams) Urge(ctx *Context, di uint32) float32 { } // AddEffort adds an effort increment of urgency and updates the Urge factor -func (ur *UrgencyParams) AddEffort(ctx *Context, di uint32, inc float32) { +func (ur *UrgencyParams) AddEffort(di uint32, inc float32) { GlobalScalars[GvUrgencyRaw, di] += inc - ur.Urge(ctx, di) + ur.Urge(di) } ///////////////////////////////////////////////////////// @@ -325,7 +325,7 @@ func (us *USParams) Update() { } // USnegCostFromRaw sets normalized NegUS, Cost values from Raw values -func (us *USParams) USnegCostFromRaw(ctx *Context, di uint32) { +func (us *USParams) USnegCostFromRaw(di uint32) { for i, ng := range us.USnegGains { raw := GlobalVectors[GvUSnegRaw, i, di] norm := RubiconNormFun(ng * raw) @@ -339,7 +339,7 @@ func (us *USParams) USnegCostFromRaw(ctx *Context, di uint32) { } // USnegToZero sets all values of USneg, USnegRaw to zero -func (us *USParams) USnegToZero(ctx *Context, di uint32) { +func (us *USParams) USnegToZero(di uint32) { for i := range us.USnegGains { GlobalVectors[GvUSneg, i, di] = 0 GlobalVectors[GvUSnegRaw, i, di] = 0 @@ -347,7 +347,7 @@ func (us *USParams) USnegToZero(ctx *Context, di uint32) { } // CostToZero sets all values of Cost, CostRaw to zero -func (us *USParams) CostToZero(ctx *Context, di uint32) { +func (us *USParams) CostToZero(di uint32) { for i := range us.CostGains { GlobalVectors[GvCost, i, di] = 0 GlobalVectors[GvCostRaw, i, di] = 0 @@ -355,7 +355,7 @@ func (us *USParams) CostToZero(ctx *Context, di uint32) { } // USposToZero sets all values of USpos to zero -func (us *USParams) USposToZero(ctx *Context, di uint32) { +func (us *USParams) USposToZero(di uint32) { for i := range us.PVposWts { GlobalVectors[GvUSpos, i, di] = 0 } @@ -417,7 +417,7 @@ func (lh *LHbParams) Update() { } // Reset resets all LHb vars back to 0 -func (lh *LHbParams) Reset(ctx *Context, di uint32) { +func (lh *LHbParams) Reset(di uint32) { GlobalScalars[GvLHbDip, di] = 0 GlobalScalars[GvLHbBurst, di] = 0 GlobalScalars[GvLHbPVDA, di] = 0 @@ -453,7 +453,7 @@ func (lh *LHbParams) DAFromPVs(pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) // and PVDA ~= Burst - Dip, for case when there is a primary // positive reward value or a give-up state has triggered. // Returns the overall net reward magnitude, prior to VSPatch discounting. -func (lh *LHbParams) DAforUS(ctx *Context, di uint32, pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) float32 { +func (lh *LHbParams) DAforUS(di uint32, pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) float32 { burst, dip, da, rew := lh.DAFromPVs(pvPos, pvNeg, vsPatchPos, vsPatchPosSum) GlobalScalars[GvLHbDip, di] = dip GlobalScalars[GvLHbBurst, di] = burst @@ -470,7 +470,7 @@ func (lh *LHbParams) DAforUS(ctx *Context, di uint32, pvPos, pvNeg, vsPatchPos, // Because the LHb only responds when it decides to GiveUp, // there is no response in this case. // DA is instead driven by CS-based computation, in rubicon_layers.go, VTAParams.VTADA -func (lh *LHbParams) DAforNoUS(ctx *Context, di uint32) float32 { +func (lh *LHbParams) DAforNoUS(di uint32) float32 { GlobalScalars[GvLHbDip, di] = 0 GlobalScalars[GvLHbBurst, di] = 0 GlobalScalars[GvLHbPVDA, di] = 0 @@ -554,7 +554,7 @@ func (gp *GiveUpParams) Prob(cnSum, guSum float32, rnd randx.Rand) (float32, boo // Sums computes the summed weighting factors that drive continue and give up // contributions to the probability function. -func (gp *GiveUpParams) Sums(ctx *Context, di uint32) (cnSum, guSum float32) { +func (gp *GiveUpParams) Sums(di uint32) (cnSum, guSum float32) { negSum := GlobalScalars[GvPVnegSum, di] guU := gp.Utility * max(gp.MinUtility, negSum) cnU := gp.Utility * max(gp.MinUtility, GlobalScalars[GvPVposEst, di]) // todo: var? @@ -692,7 +692,7 @@ func (rp *Rubicon) USnegIndex(simUsIndex int) int { // Two costs (Time, Effort) are also automatically allocated and managed. // The USs specified here need to be managed by the simulation via the SetUS method. // Positive USs each have corresponding Drives. -func (rp *Rubicon) SetNUSs(ctx *Context, nPos, nNeg int) { +func (rp *Rubicon) SetNUSs(nPos, nNeg int) { nix := GetNetworkIxs(0) nPos = rp.USposIndex(max(nPos, 1)) nNeg = rp.USnegIndex(max(nNeg, 1)) // ensure at least 1 @@ -707,15 +707,15 @@ func (rp *Rubicon) SetNUSs(ctx *Context, nPos, nNeg int) { } // Reset resets all Rubicon state -func (rp *Rubicon) Reset(ctx *Context, di uint32) { - rp.Drive.ToBaseline(ctx, di) - rp.TimeEffortReset(ctx, di) - rp.Urgency.Reset(ctx, di) - rp.InitUS(ctx, di) - rp.LHb.Reset(ctx, di) - rp.Drive.VarToZero(ctx, di, GvVSPatchD1) - rp.Drive.VarToZero(ctx, di, GvVSPatchD2) - rp.ResetGoalState(ctx, di) +func (rp *Rubicon) Reset(di uint32) { + rp.Drive.ToBaseline(di) + rp.TimeEffortReset(di) + rp.Urgency.Reset(di) + rp.InitUS(di) + rp.LHb.Reset(di) + rp.Drive.VarToZero(di, GvVSPatchD1) + rp.Drive.VarToZero(di, GvVSPatchD2) + rp.ResetGoalState(di) GlobalScalars[GvVtaDA, di] = 0 GlobalScalars[GvVSMatrixJustGated, di] = 0 GlobalScalars[GvVSMatrixHasGated, di] = 0 @@ -724,21 +724,21 @@ func (rp *Rubicon) Reset(ctx *Context, di uint32) { } // InitUS initializes all the USs to zero -func (rp *Rubicon) InitUS(ctx *Context, di uint32) { - rp.USs.USposToZero(ctx, di) - rp.USs.USnegToZero(ctx, di) - rp.USs.CostToZero(ctx, di) +func (rp *Rubicon) InitUS(di uint32) { + rp.USs.USposToZero(di) + rp.USs.USnegToZero(di) + rp.USs.CostToZero(di) GlobalScalars[GvHasRew, di] = 0 GlobalScalars[GvRew, di] = 0 } // InitDrives initializes all the Drives to baseline values (default = 0) -func (rp *Rubicon) InitDrives(ctx *Context, di uint32) { - rp.Drive.ToBaseline(ctx, di) +func (rp *Rubicon) InitDrives(di uint32) { + rp.Drive.ToBaseline(di) } // AddTimeEffort adds a unit of time and an increment of effort -func (rp *Rubicon) AddTimeEffort(ctx *Context, di uint32, effort float32) { +func (rp *Rubicon) AddTimeEffort(di uint32, effort float32) { GlobalScalars[GvTime, di] += 1 tm := GlobalScalars[GvTime, di] GlobalVectors[GvCostRaw, 0, di] = tm // time is neg 0 @@ -754,17 +754,17 @@ func (rp *Rubicon) AddTimeEffort(ctx *Context, di uint32, effort float32) { // and Urgency updates otherwise (when not goal engaged) // Call this at the start of the trial, in ApplyRubicon method, // after NewState. -func (rp *Rubicon) EffortUrgencyUpdate(ctx *Context, di uint32, effort float32) { +func (rp *Rubicon) EffortUrgencyUpdate(di uint32, effort float32) { if GlobalScalars[GvVSMatrixHasGated, di] > 0 { - rp.AddTimeEffort(ctx, di, effort) + rp.AddTimeEffort(di, effort) } else { - rp.Urgency.AddEffort(ctx, di, effort) + rp.Urgency.AddEffort(di, effort) } } // TimeEffortReset resets the raw time and effort back to zero, // at start of new gating event -func (rp *Rubicon) TimeEffortReset(ctx *Context, di uint32) { +func (rp *Rubicon) TimeEffortReset(di uint32) { GlobalScalars[GvTime, di] = 0 GlobalScalars[GvEffort, di] = 0 GlobalVectors[GvCostRaw, 0, di] = 0 // effort is neg 0 @@ -778,12 +778,12 @@ func (rp *Rubicon) TimeEffortReset(ctx *Context, di uint32) { // This is not called directly in the Rubicon code -- can be used to compute // what the Rubicon code itself will compute -- see LHbPVDA // todo: this is not very meaningful anymore -// func (pp *Rubicon) PVposFromDriveEffort(ctx *Context, usValue, drive, effort float32) float32 { +// func (pp *Rubicon) PVposFromDriveEffort(usValue, drive, effort float32) float32 { // return usValue * drive * (1 - RubiconNormFun(pp.USs.PVnegWts[0]*effort)) // } // RubiconSetDrive sets given Drive to given value -func (rp *Rubicon) SetDrive(ctx *Context, di uint32, dr uint32, val float32) { +func (rp *Rubicon) SetDrive(di uint32, dr uint32, val float32) { GlobalVectors[GvDrives, dr, di] = val } @@ -791,19 +791,19 @@ func (rp *Rubicon) SetDrive(ctx *Context, di uint32, dr uint32, val float32) { // curiosity sets the strength for the curiosity drive // and drives are strengths of the remaining sim-specified drives, in order. // any drives not so specified are at the InitDrives baseline level. -func (rp *Rubicon) SetDrives(ctx *Context, di uint32, curiosity float32, drives ...float32) { - rp.InitDrives(ctx, di) - rp.SetDrive(ctx, di, 0, curiosity) +func (rp *Rubicon) SetDrives(di uint32, curiosity float32, drives ...float32) { + rp.InitDrives(di) + rp.SetDrive(di, 0, curiosity) for i, v := range drives { - rp.SetDrive(ctx, di, uint32(1+i), v) + rp.SetDrive(di, uint32(1+i), v) } } // DriveUpdate is used when auto-updating drive levels based on US consumption, // which partially satisfies (decrements) corresponding drive, // and on time passing, where drives adapt to their overall baseline levels. -func (rp *Rubicon) DriveUpdate(ctx *Context, di uint32) { - rp.Drive.ExpStepAll(ctx, di) +func (rp *Rubicon) DriveUpdate(di uint32) { + rp.Drive.ExpStepAll(di) nd := rp.NPosUSs for i := uint32(0); i < nd; i++ { us := GlobalVectors[GvUSpos, i, di] @@ -821,7 +821,7 @@ func (rp *Rubicon) DriveUpdate(ctx *Context, di uint32) { // and sets the global HasRew flag, thus triggering a US learning event. // Note that costs can be used to track negative USs that are not strong // enough to trigger a US learning event. -func (rp *Rubicon) SetUS(ctx *Context, di uint32, valence ValenceTypes, usIndex int, magnitude float32) { +func (rp *Rubicon) SetUS(di uint32, valence ValenceTypes, usIndex int, magnitude float32) { GlobalScalars[GvHasRew, di] = 1 if valence == Positive { usIndex = rp.USposIndex(usIndex) @@ -836,13 +836,13 @@ func (rp *Rubicon) SetUS(ctx *Context, di uint32, valence ValenceTypes, usIndex // ResetGoalState resets all the goal-engaged global values. // Critically, this is only called after goal accomplishment, // not after goal gating -- prevents "shortcutting" by re-gating. -func (rp *Rubicon) ResetGoalState(ctx *Context, di uint32) { +func (rp *Rubicon) ResetGoalState(di uint32) { GlobalScalars[GvVSMatrixHasGated, di] = 0 - rp.Urgency.Reset(ctx, di) - rp.TimeEffortReset(ctx, di) - rp.USs.USnegToZero(ctx, di) // all negs restart - rp.USs.CostToZero(ctx, di) - rp.ResetGiveUp(ctx, di) + rp.Urgency.Reset(di) + rp.TimeEffortReset(di) + rp.USs.USnegToZero(di) // all negs restart + rp.USs.CostToZero(di) + rp.ResetGiveUp(di) GlobalScalars[GvVSPatchPos, di] = 0 GlobalScalars[GvVSPatchPosSum, di] = 0 GlobalScalars[GvVSPatchPosPrev, di] = 0 @@ -859,7 +859,7 @@ func (rp *Rubicon) ResetGoalState(ctx *Context, di uint32) { } // ResetGiveUp resets all the give-up related global values. -func (rp *Rubicon) ResetGiveUp(ctx *Context, di uint32) { +func (rp *Rubicon) ResetGiveUp(di uint32) { GlobalScalars[GvPVposEst, di] = 0 GlobalScalars[GvPVposVar, di] = 0 GlobalScalars[GvGiveUpProb, di] = 0 @@ -869,7 +869,7 @@ func (rp *Rubicon) ResetGiveUp(ctx *Context, di uint32) { // NewState is called at very start of new state (trial) of processing. // sets HadRew = HasRew from last trial -- used to then reset various things // after reward. -func (rp *Rubicon) NewState(ctx *Context, di uint32, rnd randx.Rand) { +func (rp *Rubicon) NewState(di uint32, rnd randx.Rand) { hadRewF := GlobalScalars[GvHasRew, di] hadRew := num.ToBool(hadRewF) GlobalScalars[GvHadRew, di] = hadRewF @@ -880,23 +880,23 @@ func (rp *Rubicon) NewState(ctx *Context, di uint32, rnd randx.Rand) { GlobalScalars[GvHasRew, di] = 0 GlobalScalars[GvNegUSOutcome, di] = 0 - rp.VSPatchNewState(ctx, di) + rp.VSPatchNewState(di) if hadRew { - rp.ResetGoalState(ctx, di) + rp.ResetGoalState(di) } else if GlobalScalars[GvVSMatrixJustGated, di] > 0 { GlobalScalars[GvVSMatrixHasGated, di] = 1 - rp.Urgency.Reset(ctx, di) + rp.Urgency.Reset(di) } GlobalScalars[GvVSMatrixJustGated, di] = 0 - rp.USs.USposToZero(ctx, di) // pos USs must be set fresh every time + rp.USs.USposToZero(di) // pos USs must be set fresh every time } // Step does one step (trial) after applying USs, Drives, // and updating Effort. It should be the final call in ApplyRubicon. // Calls PVDA which does all US, PV, LHb, GiveUp updating. -func (rp *Rubicon) Step(ctx *Context, di uint32, rnd randx.Rand) { - rp.PVDA(ctx, di, rnd) +func (rp *Rubicon) Step(di uint32, rnd randx.Rand) { + rp.PVDA(di, rnd) } // SetGoalMaintFromLayer sets the GoalMaint global state variable @@ -904,7 +904,7 @@ func (rp *Rubicon) Step(ctx *Context, di uint32, rnd randx.Rand) { // GoalMaint is normalized 0-1 based on the given max activity level, // with anything out of range clamped to 0-1 range. // Returns (and logs) an error if layer name not found. -func (rp *Rubicon) SetGoalMaintFromLayer(ctx *Context, di uint32, net *Network, layName string, maxAct float32) error { +func (rp *Rubicon) SetGoalMaintFromLayer(di uint32, net *Network, layName string, maxAct float32) error { ly := net.LayerByName(layName) if ly == nil { err := fmt.Errorf("SetGoalMaintFromLayer: layer named: %q not found", layName) @@ -925,7 +925,7 @@ func (rp *Rubicon) SetGoalMaintFromLayer(ctx *Context, di uint32, net *Network, // DecodeFromLayer decodes value and variance from the average activity (CaSpkD) // of the given layer name. Use for decoding PVposEst and Var, and PVnegEst and Var -func (rp *Rubicon) DecodeFromLayer(ctx *Context, di uint32, net *Network, layName string) (val, vr float32, err error) { +func (rp *Rubicon) DecodeFromLayer(di uint32, net *Network, layName string) (val, vr float32, err error) { ly := net.LayerByName(layName) if ly == nil { err = fmt.Errorf("DecodeFromLayer: layer named: %q not found", layName) @@ -940,14 +940,14 @@ func (rp *Rubicon) DecodeFromLayer(ctx *Context, di uint32, net *Network, layNam // DecodePVEsts decodes estimated PV outcome values from PVposP and PVnegP // prediction layers, saves in global PVposEst, Var and PVnegEst, Var -func (rp *Rubicon) DecodePVEsts(ctx *Context, di uint32, net *Network) { - posEst, posVar, err := rp.DecodeFromLayer(ctx, di, net, "PVposP") +func (rp *Rubicon) DecodePVEsts(di uint32, net *Network) { + posEst, posVar, err := rp.DecodeFromLayer(di, net, "PVposP") if err == nil { GlobalScalars[GvPVposEst, di] = posEst GlobalScalars[GvPVposVar, di] = posVar } - negEst, negVar, err := rp.DecodeFromLayer(ctx, di, net, "PVnegP") + negEst, negVar, err := rp.DecodeFromLayer(di, net, "PVnegP") if err == nil { GlobalScalars[GvPVnegEst, di] = negEst GlobalScalars[GvPVnegVar, di] = negVar @@ -958,7 +958,7 @@ func (rp *Rubicon) DecodePVEsts(ctx *Context, di uint32, net *Network) { // in trial step units, which should decrease to 0 at the goal. // This should be set at the start of every trial. // Also computes the ProgressRate. -func (rp *Rubicon) SetGoalDistEst(ctx *Context, di uint32, dist float32) { +func (rp *Rubicon) SetGoalDistEst(di uint32, dist float32) { if GlobalScalars[GvVSMatrixHasGated, di] == 0 { GlobalScalars[GvGoalDistPrev, di] = dist GlobalScalars[GvGoalDistEst, di] = dist @@ -978,7 +978,7 @@ func (rp *Rubicon) SetGoalDistEst(ctx *Context, di uint32, dist float32) { // methods below used in computing Rubicon state, not generally called from sims // HasPosUS returns true if there is at least one non-zero positive US -func (rp *Rubicon) HasPosUS(ctx *Context, di uint32) bool { +func (rp *Rubicon) HasPosUS(di uint32) bool { nd := rp.NPosUSs for i := uint32(0); i < nd; i++ { if GlobalVectors[GvUSpos, i, di] > 0 { @@ -993,11 +993,11 @@ func (rp *Rubicon) HasPosUS(ctx *Context, di uint32) bool { // its current drive and weighting factor (pvPosSum), // and the normalized version of this sum (PVpos = overall positive PV) // as 1 / (1 + (PVposGain * pvPosSum)) -func (rp *Rubicon) PVpos(ctx *Context, di uint32) (pvPosSum, pvPos float32) { +func (rp *Rubicon) PVpos(di uint32) (pvPosSum, pvPos float32) { nd := rp.NPosUSs wts := rp.USs.PVposWts for i := uint32(0); i < nd; i++ { - pvPosSum += wts[i] * GlobalVectors[GvUSpos, i, di] * rp.Drive.EffectiveDrive(ctx, di, i) + pvPosSum += wts[i] * GlobalVectors[GvUSpos, i, di] * rp.Drive.EffectiveDrive(di, i) } pvPos = RubiconNormFun(rp.USs.PVposGain * pvPosSum) return @@ -1008,7 +1008,7 @@ func (rp *Rubicon) PVpos(ctx *Context, di uint32) (pvPosSum, pvPos float32) { // by a weighting factor and summed (usNegSum) // and the normalized version of this sum (PVneg = overall negative PV) // as 1 / (1 + (PVnegGain * PVnegSum)) -func (rp *Rubicon) PVneg(ctx *Context, di uint32) (pvNegSum, pvNeg float32) { +func (rp *Rubicon) PVneg(di uint32) (pvNegSum, pvNeg float32) { nn := rp.NNegUSs wts := rp.USs.PVnegWts for i := uint32(0); i < nn; i++ { @@ -1025,13 +1025,13 @@ func (rp *Rubicon) PVneg(ctx *Context, di uint32) (pvNegSum, pvNeg float32) { // PVsFromUSs updates the current PV summed, weighted, normalized values // from the underlying US values. -func (rp *Rubicon) PVsFromUSs(ctx *Context, di uint32) { - pvPosSum, pvPos := rp.PVpos(ctx, di) +func (rp *Rubicon) PVsFromUSs(di uint32) { + pvPosSum, pvPos := rp.PVpos(di) GlobalScalars[GvPVposSum, di] = pvPosSum GlobalScalars[GvPVpos, di] = pvPos - GlobalScalars[GvHasPosUS, di] = num.FromBool[float32](rp.HasPosUS(ctx, di)) + GlobalScalars[GvHasPosUS, di] = num.FromBool[float32](rp.HasPosUS(di)) - pvNegSum, pvNeg := rp.PVneg(ctx, di) + pvNegSum, pvNeg := rp.PVneg(di) GlobalScalars[GvPVnegSum, di] = pvNegSum GlobalScalars[GvPVneg, di] = pvNeg } @@ -1039,7 +1039,7 @@ func (rp *Rubicon) PVsFromUSs(ctx *Context, di uint32) { // VSPatchNewState does VSPatch processing in NewState: // updates global VSPatchPos and VSPatchPosSum, sets to RewPred. // uses max across recorded VSPatch activity levels. -func (rp *Rubicon) VSPatchNewState(ctx *Context, di uint32) { +func (rp *Rubicon) VSPatchNewState(di uint32) { prev := GlobalScalars[GvVSPatchPos, di] GlobalScalars[GvVSPatchPosPrev, di] = prev mx := float32(0) @@ -1074,14 +1074,14 @@ func (rp *Rubicon) VSPatchNewState(ctx *Context, di uint32) { // PVposEstFromUSs returns the estimated positive PV value // based on drives and given US values. This can be used // to compute estimates to compare network performance. -func (rp *Rubicon) PVposEstFromUSs(ctx *Context, di uint32, uss []float32) (pvPosSum, pvPos float32) { +func (rp *Rubicon) PVposEstFromUSs(di uint32, uss []float32) (pvPosSum, pvPos float32) { nd := rp.NPosUSs if len(uss) < int(nd) { nd = uint32(len(uss)) } wts := rp.USs.PVposWts for i := uint32(0); i < nd; i++ { - pvPosSum += wts[i] * uss[i] * rp.Drive.EffectiveDrive(ctx, di, i) + pvPosSum += wts[i] * uss[i] * rp.Drive.EffectiveDrive(di, i) } pvPos = RubiconNormFun(rp.USs.PVposGain * pvPosSum) return @@ -1139,8 +1139,8 @@ func (rp *Rubicon) DAFromPVs(pvPos, pvNeg, vsPatchPos, vsPatchPosSum float32) (b // GiveUpOnGoal determines whether to give up on current goal // based on Utility, Timing, and Progress weight factors. -func (rp *Rubicon) GiveUpOnGoal(ctx *Context, di uint32, rnd randx.Rand) bool { - cnSum, guSum := rp.GiveUp.Sums(ctx, di) +func (rp *Rubicon) GiveUpOnGoal(di uint32, rnd randx.Rand) bool { + cnSum, guSum := rp.GiveUp.Sums(di) prob, giveUp := rp.GiveUp.Prob(cnSum, guSum, rnd) GlobalScalars[GvGiveUpProb, di] = prob GlobalScalars[GvGiveUp, di] = num.FromBool[float32](giveUp) @@ -1153,9 +1153,9 @@ func (rp *Rubicon) GiveUpOnGoal(ctx *Context, di uint32, rnd randx.Rand) bool { // and the resulting values are stored in global variables. // Called after updating USs, Effort, Drives at start of trial step, // in Step. -func (rp *Rubicon) PVDA(ctx *Context, di uint32, rnd randx.Rand) { - rp.USs.USnegCostFromRaw(ctx, di) - rp.PVsFromUSs(ctx, di) +func (rp *Rubicon) PVDA(di uint32, rnd randx.Rand) { + rp.USs.USnegCostFromRaw(di) + rp.PVsFromUSs(di) hasRew := (GlobalScalars[GvHasRew, di] > 0) pvPos := GlobalScalars[GvPVpos, di] @@ -1164,30 +1164,30 @@ func (rp *Rubicon) PVDA(ctx *Context, di uint32, rnd randx.Rand) { vsPatchPosSum := GlobalScalars[GvVSPatchPosSum, di] if hasRew { - rp.ResetGiveUp(ctx, di) - rew := rp.LHb.DAforUS(ctx, di, pvPos, pvNeg, vsPatchPos, vsPatchPosSum) // only when actual pos rew + rp.ResetGiveUp(di) + rew := rp.LHb.DAforUS(di, pvPos, pvNeg, vsPatchPos, vsPatchPosSum) // only when actual pos rew GlobalScalars[GvRew, di] = rew return } if GlobalScalars[GvVSMatrixHasGated, di] > 0 { - giveUp := rp.GiveUpOnGoal(ctx, di, rnd) + giveUp := rp.GiveUpOnGoal(di, rnd) if giveUp { GlobalScalars[GvHasRew, di] = 1 // key for triggering reset - rew := rp.LHb.DAforUS(ctx, di, pvPos, pvNeg, vsPatchPos, vsPatchPosSum) // only when actual rew + rew := rp.LHb.DAforUS(di, pvPos, pvNeg, vsPatchPos, vsPatchPosSum) // only when actual rew GlobalScalars[GvRew, di] = rew return } } // no US regular case - rp.LHb.DAforNoUS(ctx, di) + rp.LHb.DAforNoUS(di) GlobalScalars[GvRew, di] = 0 } // GlobalSetRew is a convenience function for setting the external reward // state in Globals variables -func GlobalSetRew(ctx *Context, di uint32, rew float32, hasRew bool) { +func GlobalSetRew(di uint32, rew float32, hasRew bool) { GlobalScalars[GvHasRew, di] = num.FromBool[float32](hasRew) if hasRew { GlobalScalars[GvRew, di] = rew @@ -1200,7 +1200,7 @@ func GlobalSetRew(ctx *Context, di uint32, rew float32, hasRew bool) { // RubiconUSStimValue returns stimulus value for US at given index // and valence (includes Cost). If US > 0.01, a full 1 US activation is returned. -func RubiconUSStimValue(ctx *Context, di uint32, usIndex uint32, valence ValenceTypes) float32 { +func RubiconUSStimValue(di uint32, usIndex uint32, valence ValenceTypes) float32 { nix := GetNetworkIxs(0) us := float32(0) switch valence {