diff --git a/axon/act-layer.go b/axon/act-layer.go index 1d1d8edd..9ee4fa25 100644 --- a/axon/act-layer.go +++ b/axon/act-layer.go @@ -715,6 +715,8 @@ func (ly *LayerParams) CyclePost(ctx *Context, di uint32) { lpi := ly.PoolIndex(0) ly.CyclePostLayer(ctx, lpi, di) switch ly.Type { + case MatrixLayer, BGThalLayer: + ly.GatedFromSpkMax(ctx, di) case CeMLayer: ly.CyclePostCeMLayer(ctx, lpi, di) case VSPatchLayer: @@ -850,6 +852,24 @@ func (ly *LayerParams) CyclePostVSPatchLayer(ctx *Context, pi, di uint32, spi in //////// Phase timescale +// DecayStateNeuronsAll decays neural activation state by given proportion +// (default decay values are ly.Params.Acts.Decay.Act, Glong, AHP) +// for all data parallel indexes. Does not decay pool or layer state. +// This is used for minus phase of Pulvinar layers to clear state in prep +// for driver plus phase. +func (ly *LayerParams) DecayStateNeuronsAll(ctx *Context, decay, glong, ahp float32) { + nn := ly.Indexes.NNeurons + for lni := uint32(0); lni < nn; lni++ { + ni := ly.Indexes.NeurSt + lni + if NeuronIsOff(ni) { + continue + } + for di := uint32(0); di < ctx.NData; di++ { + ly.Acts.DecayState(ctx, ni, di, decay, glong, ahp) + } + } +} + // NewStateLayer does NewState at the layer level, called func (ly *LayerParams) NewStateLayer(ctx *Context) { actMinusAvg := float32(0) @@ -951,6 +971,17 @@ func (ly *LayerParams) MinusPhaseNeuron(ctx *Context, ni, di uint32) { Neurons.Set(Neurons.Value(int(ni), int(di), int(CaSpkP)), int(ni), int(di), int(CaSpkPM)) } +// MinusPhasePost does special algorithm processing at end of minus +func (ly *LayerParams) MinusPhasePost(ctx *Context) { + switch ly.Type { + case MatrixLayer: + ly.MatrixGated(ctx) // need gated state for decisions about action processing, so do in minus too + case PulvinarLayer: + ly.DecayStateNeuronsAll(ctx, 1, 1, 0) + default: + } +} + // PlusPhaseStartNeuron does neuron level plus-phase start: // applies Target inputs as External inputs. func (ly *LayerParams) PlusPhaseStartNeuron(ctx *Context, ni, di uint32) { @@ -1196,16 +1227,6 @@ func (ly *Layer) UpdateExtFlags(ctx *Context) { } } -// MinusPhasePost does special algorithm processing at end of minus -func (ly *Layer) MinusPhasePost(ctx *Context) { - switch ly.Type { - case MatrixLayer: - ly.MatrixGated(ctx) // need gated state for decisions about action processing, so do in minus too - case PulvinarLayer: - ly.DecayStateNeuronsAll(ctx, 1, 1, 0) - } -} - // PlusPhasePost does special algorithm processing at end of plus func (ly *Layer) PlusPhasePost(ctx *Context) { ly.PlusPhaseActAvg(ctx) @@ -1236,7 +1257,7 @@ func (ly *Layer) PlusPhasePost(ctx *Context) { } switch ly.Type { case MatrixLayer: - ly.MatrixGated(ctx) + ly.Params.MatrixGated(ctx) } } diff --git a/axon/act-layer.goal b/axon/act-layer.goal index ca792580..219f5bf0 100644 --- a/axon/act-layer.goal +++ b/axon/act-layer.goal @@ -713,6 +713,8 @@ func (ly *LayerParams) CyclePost(ctx *Context, di uint32) { lpi := ly.PoolIndex(0) ly.CyclePostLayer(ctx, lpi, di) switch ly.Type { + case MatrixLayer, BGThalLayer: + ly.GatedFromSpkMax(ctx, di) case CeMLayer: ly.CyclePostCeMLayer(ctx, lpi, di) case VSPatchLayer: @@ -849,6 +851,24 @@ func (ly *LayerParams) CyclePostVSPatchLayer(ctx *Context, pi, di uint32, spi in //////// Phase timescale +// DecayStateNeuronsAll decays neural activation state by given proportion +// (default decay values are ly.Params.Acts.Decay.Act, Glong, AHP) +// for all data parallel indexes. Does not decay pool or layer state. +// This is used for minus phase of Pulvinar layers to clear state in prep +// for driver plus phase. +func (ly *LayerParams) DecayStateNeuronsAll(ctx *Context, decay, glong, ahp float32) { + nn := ly.Indexes.NNeurons + for lni := uint32(0); lni < nn; lni++ { + ni := ly.Indexes.NeurSt + lni + if NeuronIsOff(ni) { + continue + } + for di := uint32(0); di < ctx.NData; di++ { + ly.Acts.DecayState(ctx, ni, di, decay, glong, ahp) + } + } +} + // NewStateLayer does NewState at the layer level, called func (ly *LayerParams) NewStateLayer(ctx *Context) { actMinusAvg := float32(0) @@ -950,6 +970,17 @@ func (ly *LayerParams) MinusPhaseNeuron(ctx *Context, ni, di uint32) { Neurons[ni, di, CaSpkPM] = Neurons[ni, di, CaSpkP] } +// MinusPhasePost does special algorithm processing at end of minus +func (ly *LayerParams) MinusPhasePost(ctx *Context) { + switch ly.Type { + case MatrixLayer: + ly.MatrixGated(ctx) // need gated state for decisions about action processing, so do in minus too + case PulvinarLayer: + ly.DecayStateNeuronsAll(ctx, 1, 1, 0) + default: + } +} + // PlusPhaseStartNeuron does neuron level plus-phase start: // applies Target inputs as External inputs. func (ly *LayerParams) PlusPhaseStartNeuron(ctx *Context, ni, di uint32) { @@ -1195,16 +1226,6 @@ func (ly *Layer) UpdateExtFlags(ctx *Context) { } } -// MinusPhasePost does special algorithm processing at end of minus -func (ly *Layer) MinusPhasePost(ctx *Context) { - switch ly.Type { - case MatrixLayer: - ly.MatrixGated(ctx) // need gated state for decisions about action processing, so do in minus too - case PulvinarLayer: - ly.DecayStateNeuronsAll(ctx, 1, 1, 0) - } -} - // PlusPhasePost does special algorithm processing at end of plus func (ly *Layer) PlusPhasePost(ctx *Context) { ly.PlusPhaseActAvg(ctx) @@ -1235,7 +1256,7 @@ func (ly *Layer) PlusPhasePost(ctx *Context) { } switch ly.Type { case MatrixLayer: - ly.MatrixGated(ctx) + ly.Params.MatrixGated(ctx) } } diff --git a/axon/act-net.go b/axon/act-net.go index a72b5996..3a2f11e5 100644 --- a/axon/act-net.go +++ b/axon/act-net.go @@ -35,8 +35,6 @@ func (nt *Network) Cycle(ncyc int, getNeurons bool) { if getNeurons { RunDoneLayersNeurons() - } else { - RunDoneLayers() } // todo: fix this: @@ -105,7 +103,6 @@ func (nt *Network) ApplyExts() { ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) RunApplyExtsNeuron(nd) - // note: not completed until cycle is run } // MinusPhase does updating after end of minus phase. @@ -116,22 +113,7 @@ func (nt *Network) MinusPhase() { pd := int(nix.NPools * ctx.NData) RunMinusPhasePool(pd) RunMinusPhaseNeuron(nd) - RunDoneLayersNeurons() - nt.MinusPhasePost() - ToGPULayersNeurons() - // todo: - // nt.GPU.SyncStateToGPU() -} - -// MinusPhasePost does special CPU post processing. -func (nt *Network) MinusPhasePost() { - ctx := nt.Context() - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.MinusPhasePost(ctx) - } + RunMinusPhasePost(int(nix.NLayers)) } // PlusPhaseStart does updating at the start of the plus phase: @@ -141,7 +123,6 @@ func (nt *Network) PlusPhaseStart() { ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) RunPlusPhaseStartNeuron(nd) - RunDone() } // PlusPhase does updating after end of plus phase @@ -357,6 +338,12 @@ func MinusPhaseNeuron(i uint32) { //gosl:kernel Layers[li].MinusPhaseNeuron(ctx, ni, di) } +// MinusPhasePost does special algorithm post processing. +func MinusPhasePost(li uint32) { //gosl:kernel + ctx := GetCtx(0) + Layers[li].MinusPhasePost(ctx) +} + // PlusPhaseStartNeuron is the kernel over Neurons * Data to // do neuron-level updating at start of plus phase. func PlusPhaseStartNeuron(i uint32) { //gosl:kernel diff --git a/axon/act-net.goal b/axon/act-net.goal index 0fbf227e..4582721c 100644 --- a/axon/act-net.goal +++ b/axon/act-net.goal @@ -33,8 +33,6 @@ func (nt *Network) Cycle(ncyc int, getNeurons bool) { if getNeurons { RunDoneLayersNeurons() - } else { - RunDoneLayers() } // todo: fix this: @@ -99,7 +97,6 @@ func (nt *Network) ApplyExts() { ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) RunApplyExtsNeuron(nd) - // note: not completed until cycle is run } // MinusPhase does updating after end of minus phase. @@ -110,22 +107,7 @@ func (nt *Network) MinusPhase() { pd := int(nix.NPools * ctx.NData) RunMinusPhasePool(pd) RunMinusPhaseNeuron(nd) - RunDoneLayersNeurons() - nt.MinusPhasePost() - ToGPULayersNeurons() - // todo: - // nt.GPU.SyncStateToGPU() -} - -// MinusPhasePost does special CPU post processing. -func (nt *Network) MinusPhasePost() { - ctx := nt.Context() - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.MinusPhasePost(ctx) - } + RunMinusPhasePost(int(nix.NLayers)) } // PlusPhaseStart does updating at the start of the plus phase: @@ -135,7 +117,6 @@ func (nt *Network) PlusPhaseStart() { ctx := nt.Context() nd := int(nix.NNeurons * ctx.NData) RunPlusPhaseStartNeuron(nd) - RunDone() } // PlusPhase does updating after end of plus phase @@ -351,6 +332,12 @@ func MinusPhaseNeuron(i uint32) { //gosl:kernel Layers[li].MinusPhaseNeuron(ctx, ni, di) } +// MinusPhasePost does special algorithm post processing. +func MinusPhasePost(li uint32) { //gosl:kernel + ctx := GetCtx(0) + Layers[li].MinusPhasePost(ctx) +} + // PlusPhaseStartNeuron is the kernel over Neurons * Data to // do neuron-level updating at start of plus phase. func PlusPhaseStartNeuron(i uint32) { //gosl:kernel diff --git a/axon/gosl.go b/axon/gosl.go index 5a6ba35f..fc3460c3 100644 --- a/axon/gosl.go +++ b/axon/gosl.go @@ -75,6 +75,7 @@ func GPUInit() { gpu.NewComputePipelineShaderFS(shaders, "shaders/LayerGi.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/MinusPhaseNeuron.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/MinusPhasePool.wgsl", sy) + gpu.NewComputePipelineShaderFS(shaders, "shaders/MinusPhasePost.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/NewStateLayer.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/NewStateNeuron.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/PlusPhaseNeuron.wgsl", sy) @@ -745,6 +746,48 @@ func RunOneMinusPhasePool(n int, syncVars ...GPUVars) { RunMinusPhasePoolCPU(n) } } +// RunMinusPhasePost runs the MinusPhasePost kernel with given number of elements, +// on either the CPU or GPU depending on the UseGPU variable. +// Can call multiple Run* kernels in a row, which are then all launched +// in the same command submission on the GPU, which is by far the most efficient. +// MUST call RunDone (with optional vars to sync) after all Run calls. +// Alternatively, a single-shot RunOneMinusPhasePost call does Run and Done for a +// single run-and-sync case. +func RunMinusPhasePost(n int) { + if UseGPU { + RunMinusPhasePostGPU(n) + } else { + RunMinusPhasePostCPU(n) + } +} + +// RunMinusPhasePostGPU runs the MinusPhasePost kernel on the GPU. See [RunMinusPhasePost] for more info. +func RunMinusPhasePostGPU(n int) { + sy := GPUSystem + pl := sy.ComputePipelines["MinusPhasePost"] + ce, _ := sy.BeginComputePass() + pl.Dispatch1D(ce, n, 64) +} + +// RunMinusPhasePostCPU runs the MinusPhasePost kernel on the CPU. +func RunMinusPhasePostCPU(n int) { + gpu.VectorizeFunc(0, n, MinusPhasePost) +} + +// RunOneMinusPhasePost runs the MinusPhasePost kernel with given number of elements, +// on either the CPU or GPU depending on the UseGPU variable. +// This version then calls RunDone with the given variables to sync +// after the Run, for a single-shot Run-and-Done call. If multiple kernels +// can be run in sequence, it is much more efficient to do multiple Run* +// calls followed by a RunDone call. +func RunOneMinusPhasePost(n int, syncVars ...GPUVars) { + if UseGPU { + RunMinusPhasePostGPU(n) + RunDone(syncVars...) + } else { + RunMinusPhasePostCPU(n) + } +} // RunNewStateLayer runs the NewStateLayer kernel with given number of elements, // on either the CPU or GPU depending on the UseGPU variable. // Can call multiple Run* kernels in a row, which are then all launched diff --git a/axon/init-layer.go b/axon/init-layer.go index a4859fd6..3459d50a 100644 --- a/axon/init-layer.go +++ b/axon/init-layer.go @@ -345,21 +345,3 @@ func (ly *Layer) DecayStatePool(ctx *Context, pool int, decay, glong, ahp float3 PoolInhibDecay(pi, di, decay) } } - -// DecayStateNeuronsAll decays neural activation state by given proportion -// (default decay values are ly.Params.Acts.Decay.Act, Glong, AHP) -// for all data parallel indexes. Does not decay pool or layer state. -// This is used for minus phase of Pulvinar layers to clear state in prep -// for driver plus phase. -func (ly *Layer) DecayStateNeuronsAll(ctx *Context, decay, glong, ahp float32) { - nn := ly.NNeurons - for lni := uint32(0); lni < nn; lni++ { - ni := ly.NeurStIndex + lni - if NeuronIsOff(ni) { - continue - } - for di := uint32(0); di < ctx.NData; di++ { - ly.Params.Acts.DecayState(ctx, ni, di, decay, glong, ahp) - } - } -} diff --git a/axon/init-layer.goal b/axon/init-layer.goal index cac47af0..724f516e 100644 --- a/axon/init-layer.goal +++ b/axon/init-layer.goal @@ -343,22 +343,4 @@ func (ly *Layer) DecayStatePool(ctx *Context, pool int, decay, glong, ahp float3 } } -// DecayStateNeuronsAll decays neural activation state by given proportion -// (default decay values are ly.Params.Acts.Decay.Act, Glong, AHP) -// for all data parallel indexes. Does not decay pool or layer state. -// This is used for minus phase of Pulvinar layers to clear state in prep -// for driver plus phase. -func (ly *Layer) DecayStateNeuronsAll(ctx *Context, decay, glong, ahp float32) { - nn := ly.NNeurons - for lni := uint32(0); lni < nn; lni++ { - ni := ly.NeurStIndex + lni - if NeuronIsOff(ni) { - continue - } - for di := uint32(0); di < ctx.NData; di++ { - ly.Params.Acts.DecayState(ctx, ni, di, decay, glong, ahp) - } - } -} - diff --git a/axon/layerparams.go b/axon/layerparams.go index 258c041d..e35bae00 100644 --- a/axon/layerparams.go +++ b/axon/layerparams.go @@ -112,7 +112,7 @@ type LayerParams struct { Pulv PulvParams `display:"inline"` // Matrix has parameters for BG Striatum Matrix MSN layers, which are - // the main Go / NoGo gating units in BG. + // the main Go / NoGo gating units in BG. GateThr also used in BGThal. Matrix MatrixParams `display:"inline"` // GP has params for GP (globus pallidus) of the BG layers. @@ -232,7 +232,7 @@ func (ly *LayerParams) ShouldDisplay(field string) bool { case "Pulv": return ly.Type == PulvinarLayer case "Matrix": - return ly.Type == MatrixLayer + return ly.Type == MatrixLayer || ly.Type == BGThalLayer case "GP": return ly.Type == GPLayer case "LDT": @@ -276,7 +276,7 @@ func (ly *LayerParams) AllParams() string { b, _ = json.MarshalIndent(&ly.Pulv, "", " ") str += "Pulv: {\n " + JsonToParams(b) - case MatrixLayer: + case MatrixLayer, BGThalLayer: b, _ = json.MarshalIndent(&ly.Matrix, "", " ") str += "Matrix: {\n " + JsonToParams(b) case GPLayer: diff --git a/axon/layertypes.go b/axon/layertypes.go index a5ea941e..0061cd6d 100644 --- a/axon/layertypes.go +++ b/axon/layertypes.go @@ -39,8 +39,7 @@ const ( // or learning directly. It is rarely used in axon. CompareLayer - ///////////// - // Deep + //////// Deep // CT are layer 6 corticothalamic projecting neurons, // which drive "top down" predictions in Pulvinar layers. @@ -85,8 +84,7 @@ const ( // prediction layers, and other layers that require predictive dynamic PTPredLayer - ///////////////////////////// - // PCORE Basal Ganglia (BG) + //////// PCORE Basal Ganglia (BG) // MatrixLayer represents the matrisome medium spiny neurons (MSNs) // that are the main Go / NoGo gating units in BG. @@ -120,8 +118,7 @@ const ( // For visualization and / or motor action signaling. VSGatedLayer - ///////////// - // Rubicon + //////// Rubicon // BLALayer represents a basolateral amygdala layer // which learns to associate arbitrary stimuli (CSs) @@ -185,8 +182,7 @@ const ( // vial Global state values to all layers. VTALayer - ///////////// - // RL + //////// RL // RewLayer represents positive (first unit) or negative (second unit) // reward values, showing spiking rates for each, and Act always represents diff --git a/axon/pcore-layer.go b/axon/pcore-layer.go index 0d9a134a..2a957125 100644 --- a/axon/pcore-layer.go +++ b/axon/pcore-layer.go @@ -24,13 +24,16 @@ import ( // Must set Learn.NeuroMod.DAMod = D1Mod or D2Mod via SetBuildConfig("DAMod"). type MatrixParams struct { - // threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated + // GateThr is the threshold on layer Avg SpkMax for Matrix Go and BG Thal + // layers to count as having gated. GateThr float32 `default:"0.05"` - // is this a ventral striatum (VS) matrix layer? if true, the gating status of this layer is recorded in the Global state, and used for updating effort and other factors. + // IsVS is this a ventral striatum (VS) matrix layer? If true, the gating + // status of this layer is recorded in the Global state, + // and used for updating effort and other factors. IsVS slbool.Bool - // index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName + // index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName OtherMatrixIndex int32 `edit:"-"` // index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used @@ -95,56 +98,57 @@ func (gp *GPParams) Defaults() { func (gp *GPParams) Update() { } -//gosl:end - // MatrixGated is called after std PlusPhase, on CPU, has Pool info // downloaded from GPU, to set Gated flag based on SpkMax activity -func (ly *Layer) MatrixGated(ctx *Context) { - if ly.Params.Learn.NeuroMod.DAMod != D1Mod { - lpi := ly.Params.PoolIndex(0) - oly := ly.Network.Layers[int(ly.Params.Matrix.OtherMatrixIndex)] - opi := oly.Params.PoolIndex(0) +func (ly *LayerParams) MatrixGated(ctx *Context) { + lpi := ly.PoolIndex(0) + if ly.Learn.NeuroMod.DAMod != D1Mod { + oly := Layers[ly.Matrix.OtherMatrixIndex] + olpi := oly.PoolSt // note: NoGo layers don't track gating at the sub-pool level! for di := uint32(0); di < ctx.NData; di++ { - PoolsInt.Set(PoolsInt.Value(int(opi), int(di), int(PoolGated)), int(lpi), int(di), int(PoolGated)) + PoolsInt.Set(PoolsInt.Value(int(olpi), int(di), int(PoolGated)), int(lpi), int(di), int(PoolGated)) } return } - // todo: Context requires data parallel state! - for di := uint32(0); di < ctx.NData; di++ { - mtxGated, poolIndex := ly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - + mtxGated := PoolsInt.Value(int(lpi), int(di), int(PoolGated)) > 0 thalGated := false - if ly.Params.Matrix.ThalLay1Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay1Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay1Index >= 0 { + tly := Layers[ly.Matrix.ThalLay1Index] + tlpi := tly.PoolSt + gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated)) + thalGated = thalGated || gt > 0 } - if ly.Params.Matrix.ThalLay2Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay2Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay2Index >= 0 { + tly := Layers[ly.Matrix.ThalLay2Index] + tlpi := tly.PoolSt + gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated)) + thalGated = thalGated || gt > 0 } - if ly.Params.Matrix.ThalLay3Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay3Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay3Index >= 0 { + tly := Layers[ly.Matrix.ThalLay3Index] + tlpi := tly.PoolSt + gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated)) + thalGated = thalGated || gt > 0 } - if ly.Params.Matrix.ThalLay4Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay4Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay4Index >= 0 { + tly := Layers[ly.Matrix.ThalLay4Index] + tlpi := tly.PoolSt + gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated)) + thalGated = thalGated || gt > 0 } - if ly.Params.Matrix.ThalLay5Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay5Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay5Index >= 0 { + tly := Layers[ly.Matrix.ThalLay5Index] + tlpi := tly.PoolSt + gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated)) + thalGated = thalGated || gt > 0 } - if ly.Params.Matrix.ThalLay6Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay6Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay6Index >= 0 { + tly := Layers[ly.Matrix.ThalLay6Index] + tlpi := tly.PoolSt + gt := PoolsInt.Value(int(tlpi), int(di), int(PoolGated)) + thalGated = thalGated || gt > 0 } mtxGated = mtxGated && thalGated @@ -156,15 +160,24 @@ func (ly *Layer) MatrixGated(ctx *Context) { // that this will make sense and not doing yet.. if !mtxGated { // nobody did if thal didn't - for spi := uint32(0); spi < ly.NPools; spi++ { - pi := ly.Params.PoolIndex(spi) + for spi := uint32(0); spi < ly.Indexes.NPools; spi++ { + pi := ly.PoolIndex(spi) PoolsInt.Set(0, int(pi), int(di), int(PoolGated)) } } - if ctx.PlusPhase.IsTrue() && ly.Params.Matrix.IsVS.IsTrue() { + if ctx.PlusPhase.IsTrue() && ly.Matrix.IsVS.IsTrue() { GlobalScalars.Set(num.FromBool[float32](mtxGated), int(GvVSMatrixJustGated), int(di)) if mtxGated { - GlobalVectors.Set(1, int(GvVSMatrixPoolGated), int(poolIndex), int(di)) + poolIndex := int32(-1) + for spi := uint32(1); spi < ly.Indexes.NPools; spi++ { + pi := ly.PoolIndex(spi) + if poolIndex < 0 && PoolsInt.Value(int(pi), int(di), int(PoolGated)) > 0 { + poolIndex = int32(pi) + } + } + if poolIndex > 0 { + GlobalVectors.Set(float32(1.0), int(GvVSMatrixPoolGated), int(poolIndex), int(di)) + } } } } @@ -172,21 +185,17 @@ func (ly *Layer) MatrixGated(ctx *Context) { // GatedFromSpkMax updates the Gated state in Pools of given layer, // based on Avg SpkMax being above given threshold. -// returns true if any gated, and the pool index if 4D layer (0 = first). -func (ly *Layer) GatedFromSpkMax(di uint32, thr float32) (bool, int) { +func (ly *LayerParams) GatedFromSpkMax(ctx *Context, di uint32) { anyGated := false - poolIndex := -1 - lpi := ly.Params.PoolIndex(0) - if ly.Is4D() { - for spi := uint32(1); spi < ly.NPools; spi++ { - pi := ly.Params.PoolIndex(spi) + lpi := ly.PoolIndex(0) + thr := ly.Matrix.GateThr + if ly.Indexes.NPools > 1 { + for spi := uint32(1); spi < ly.Indexes.NPools; spi++ { + pi := ly.PoolIndex(spi) spkavg := PoolAvgMax(AMSpkMax, AMCycle, Avg, pi, di) gthr := spkavg > thr if gthr { anyGated = true - if poolIndex < 0 { - poolIndex = int(spi) - 1 - } PoolsInt.Set(1, int(pi), int(di), int(PoolGated)) } else { PoolsInt.Set(0, int(pi), int(di), int(PoolGated)) @@ -203,16 +212,17 @@ func (ly *Layer) GatedFromSpkMax(di uint32, thr float32) (bool, int) { } else { PoolsInt.Set(0, int(lpi), int(di), int(PoolGated)) } - return anyGated, poolIndex } // AnyGated returns true if the layer-level pool Gated flag is true, // which indicates if any of the layers gated. -func (ly *Layer) AnyGated(di uint32) bool { - lpi := ly.Params.PoolIndex(0) +func (ly *LayerParams) AnyGated(di uint32) bool { + lpi := ly.PoolIndex(0) return PoolsInt.Value(int(lpi), int(di), int(PoolGated)) > 0 } +//gosl:end + func (ly *Layer) MatrixDefaults() { ly.Params.Acts.Decay.Act = 1 ly.Params.Acts.Decay.Glong = 1 // prevent carryover of NMDA diff --git a/axon/pcore-layer.goal b/axon/pcore-layer.goal index 3509ce3d..daa2154c 100644 --- a/axon/pcore-layer.goal +++ b/axon/pcore-layer.goal @@ -22,13 +22,16 @@ import ( // Must set Learn.NeuroMod.DAMod = D1Mod or D2Mod via SetBuildConfig("DAMod"). type MatrixParams struct { - // threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated + // GateThr is the threshold on layer Avg SpkMax for Matrix Go and BG Thal + // layers to count as having gated. GateThr float32 `default:"0.05"` - // is this a ventral striatum (VS) matrix layer? if true, the gating status of this layer is recorded in the Global state, and used for updating effort and other factors. + // IsVS is this a ventral striatum (VS) matrix layer? If true, the gating + // status of this layer is recorded in the Global state, + // and used for updating effort and other factors. IsVS slbool.Bool - // index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName + // index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName OtherMatrixIndex int32 `edit:"-"` // index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used @@ -93,56 +96,57 @@ func (gp *GPParams) Defaults() { func (gp *GPParams) Update() { } -//gosl:end - // MatrixGated is called after std PlusPhase, on CPU, has Pool info // downloaded from GPU, to set Gated flag based on SpkMax activity -func (ly *Layer) MatrixGated(ctx *Context) { - if ly.Params.Learn.NeuroMod.DAMod != D1Mod { - lpi := ly.Params.PoolIndex(0) - oly := ly.Network.Layers[int(ly.Params.Matrix.OtherMatrixIndex)] - opi := oly.Params.PoolIndex(0) +func (ly *LayerParams) MatrixGated(ctx *Context) { + lpi := ly.PoolIndex(0) + if ly.Learn.NeuroMod.DAMod != D1Mod { + oly := Layers[ly.Matrix.OtherMatrixIndex] + olpi := oly.PoolSt // note: NoGo layers don't track gating at the sub-pool level! for di := uint32(0); di < ctx.NData; di++ { - PoolsInt[lpi, di, PoolGated] = PoolsInt[opi, di, PoolGated] + PoolsInt[lpi, di, PoolGated] = PoolsInt[olpi, di, PoolGated] } return } - // todo: Context requires data parallel state! - for di := uint32(0); di < ctx.NData; di++ { - mtxGated, poolIndex := ly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - + mtxGated := PoolsInt[lpi, di, PoolGated] > 0 thalGated := false - if ly.Params.Matrix.ThalLay1Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay1Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay1Index >= 0 { + tly := Layers[ly.Matrix.ThalLay1Index] + tlpi := tly.PoolSt + gt := PoolsInt[tlpi, di, PoolGated] + thalGated = thalGated || gt > 0 } - if ly.Params.Matrix.ThalLay2Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay2Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay2Index >= 0 { + tly := Layers[ly.Matrix.ThalLay2Index] + tlpi := tly.PoolSt + gt := PoolsInt[tlpi, di, PoolGated] + thalGated = thalGated || gt > 0 } - if ly.Params.Matrix.ThalLay3Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay3Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay3Index >= 0 { + tly := Layers[ly.Matrix.ThalLay3Index] + tlpi := tly.PoolSt + gt := PoolsInt[tlpi, di, PoolGated] + thalGated = thalGated || gt > 0 } - if ly.Params.Matrix.ThalLay4Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay4Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay4Index >= 0 { + tly := Layers[ly.Matrix.ThalLay4Index] + tlpi := tly.PoolSt + gt := PoolsInt[tlpi, di, PoolGated] + thalGated = thalGated || gt > 0 } - if ly.Params.Matrix.ThalLay5Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay5Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay5Index >= 0 { + tly := Layers[ly.Matrix.ThalLay5Index] + tlpi := tly.PoolSt + gt := PoolsInt[tlpi, di, PoolGated] + thalGated = thalGated || gt > 0 } - if ly.Params.Matrix.ThalLay6Index >= 0 { - tly := ly.Network.Layers[int(ly.Params.Matrix.ThalLay6Index)] - gt, _ := tly.GatedFromSpkMax(di, ly.Params.Matrix.GateThr) - thalGated = thalGated || gt + if ly.Matrix.ThalLay6Index >= 0 { + tly := Layers[ly.Matrix.ThalLay6Index] + tlpi := tly.PoolSt + gt := PoolsInt[tlpi, di, PoolGated] + thalGated = thalGated || gt > 0 } mtxGated = mtxGated && thalGated @@ -154,15 +158,24 @@ func (ly *Layer) MatrixGated(ctx *Context) { // that this will make sense and not doing yet.. if !mtxGated { // nobody did if thal didn't - for spi := uint32(0); spi < ly.NPools; spi++ { - pi := ly.Params.PoolIndex(spi) + for spi := uint32(0); spi < ly.Indexes.NPools; spi++ { + pi := ly.PoolIndex(spi) PoolsInt[pi, di, PoolGated] = 0 } } - if ctx.PlusPhase.IsTrue() && ly.Params.Matrix.IsVS.IsTrue() { + if ctx.PlusPhase.IsTrue() && ly.Matrix.IsVS.IsTrue() { GlobalScalars[GvVSMatrixJustGated, di] = num.FromBool[float32](mtxGated) if mtxGated { - GlobalVectors[GvVSMatrixPoolGated, poolIndex, di] = 1 + poolIndex := int32(-1) + for spi := uint32(1); spi < ly.Indexes.NPools; spi++ { + pi := ly.PoolIndex(spi) + if poolIndex < 0 && PoolsInt[pi, di, PoolGated] > 0 { + poolIndex = int32(pi) + } + } + if poolIndex > 0 { + GlobalVectors[GvVSMatrixPoolGated, poolIndex, di] = float32(1.0) + } } } } @@ -170,21 +183,17 @@ func (ly *Layer) MatrixGated(ctx *Context) { // GatedFromSpkMax updates the Gated state in Pools of given layer, // based on Avg SpkMax being above given threshold. -// returns true if any gated, and the pool index if 4D layer (0 = first). -func (ly *Layer) GatedFromSpkMax(di uint32, thr float32) (bool, int) { +func (ly *LayerParams) GatedFromSpkMax(ctx *Context, di uint32) { anyGated := false - poolIndex := -1 - lpi := ly.Params.PoolIndex(0) - if ly.Is4D() { - for spi := uint32(1); spi < ly.NPools; spi++ { - pi := ly.Params.PoolIndex(spi) + lpi := ly.PoolIndex(0) + thr := ly.Matrix.GateThr + if ly.Indexes.NPools > 1 { + for spi := uint32(1); spi < ly.Indexes.NPools; spi++ { + pi := ly.PoolIndex(spi) spkavg := PoolAvgMax(AMSpkMax, AMCycle, Avg, pi, di) gthr := spkavg > thr if gthr { anyGated = true - if poolIndex < 0 { - poolIndex = int(spi) - 1 - } PoolsInt[pi, di, PoolGated] = 1 } else { PoolsInt[pi, di, PoolGated] = 0 @@ -201,16 +210,17 @@ func (ly *Layer) GatedFromSpkMax(di uint32, thr float32) (bool, int) { } else { PoolsInt[lpi, di, PoolGated] = 0 } - return anyGated, poolIndex } // AnyGated returns true if the layer-level pool Gated flag is true, // which indicates if any of the layers gated. -func (ly *Layer) AnyGated(di uint32) bool { - lpi := ly.Params.PoolIndex(0) +func (ly *LayerParams) AnyGated(di uint32) bool { + lpi := ly.PoolIndex(0) return PoolsInt[lpi, di, PoolGated] > 0 } +//gosl:end + func (ly *Layer) MatrixDefaults() { ly.Params.Acts.Decay.Act = 1 ly.Params.Acts.Decay.Glong = 1 // prevent carryover of NMDA diff --git a/axon/shaders/CyclePost.wgsl b/axon/shaders/CyclePost.wgsl index 075504d3..87b8f278 100644 --- a/axon/shaders/CyclePost.wgsl +++ b/axon/shaders/CyclePost.wgsl @@ -85,6 +85,9 @@ fn LayerParams_CyclePost(ly: ptr, ctx: ptr, ctx: ptr, di: u32) { + var anyGated = false; + var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); + var thr = (*ly).Matrix.GateThr; + if ((*ly).Indexes.NPools > 1) { + for (var spi = u32(1); spi < (*ly).Indexes.NPools; spi++) { + var pi = LayerParams_PoolIndex(ly, spi); + var spkavg = PoolAvgMax(AMSpkMax, AMCycle, Avg, pi, di); + var gthr = spkavg > thr; + if (gthr) { + anyGated = true; + PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(PoolGated))] = 1; + } else { + PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(PoolGated))] = 0; + } + } + } else { + var spkavg = PoolAvgMax(AMSpkMax, AMCycle, Avg, lpi, di); + if (spkavg > thr) { + anyGated = true; + } + } + if (anyGated) { + PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(lpi),u32(di),u32(PoolGated))] = 1; + } else { + PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(lpi),u32(di),u32(PoolGated))] = 0; + } +} ///////////// import: "pcore-path.go" struct MatrixPathParams { diff --git a/axon/typegen.go b/axon/typegen.go index f54ce850..11a8066c 100644 --- a/axon/typegen.go +++ b/axon/typegen.go @@ -62,7 +62,7 @@ var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerIndex var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerInhibIndexes", IDName: "layer-inhib-indexes", Doc: "LayerInhibIndexes contains indexes of layers for between-layer inhibition.", Fields: []types.Field{{Name: "Index1", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used"}, {Name: "Index2", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used"}, {Name: "Index3", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used"}, {Name: "Index4", Doc: "idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used"}}}) -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerParams", IDName: "layer-params", Doc: "LayerParams contains all of the layer parameters.\nThese values must remain constant over the course of computation.\nOn the GPU, they are loaded into a uniform.", Fields: []types.Field{{Name: "Type", Doc: "Type is the functional type of layer, which determines the code path\nfor specialized layer types, and is synchronized with [Layer.Type]."}, {Name: "Index", Doc: "Index of this layer in [Layers] list."}, {Name: "MaxData", Doc: "MaxData is the maximum number of data parallel elements."}, {Name: "PoolSt", Doc: "PoolSt is the start of pools for this layer; first one is always the layer-wide pool."}, {Name: "Acts", Doc: "Activation parameters and methods for computing activations"}, {Name: "Inhib", Doc: "Inhibition parameters and methods for computing layer-level inhibition"}, {Name: "LayInhib", Doc: "LayInhib has indexes of layers that contribute between-layer inhibition\n to this layer. Set these indexes via BuildConfig LayInhibXName (X = 1, 2...)."}, {Name: "Learn", Doc: "Learn has learning parameters and methods that operate at the neuron level."}, {Name: "Bursts", Doc: "Bursts has [BurstParams] that determine how the 5IB Burst activation\nis computed from CaSpkP integrated spiking values in Super layers."}, {Name: "CT", Doc: "CT has params for the CT corticothalamic layer and PTPred layer that\ngenerates predictions over the Pulvinar using context. Uses the CtxtGe\nexcitatory input plus stronger NMDA channels to maintain context trace."}, {Name: "Pulv", Doc: "Pulv has parameters for how the plus-phase (outcome) state of Pulvinar\nthalamic relay cell neurons is computed from the corresponding driver\nneuron Burst activation (or CaSpkP if not Super)."}, {Name: "Matrix", Doc: "Matrix has parameters for BG Striatum Matrix MSN layers, which are\nthe main Go / NoGo gating units in BG."}, {Name: "GP", Doc: "GP has params for GP (globus pallidus) of the BG layers."}, {Name: "LDT", Doc: "LDT has parameters for laterodorsal tegmentum ACh salience neuromodulatory\nsignal, driven by superior colliculus stimulus novelty, US input / absence,\nand OFC / ACC inhibition."}, {Name: "VTA", Doc: "VTA has parameters for ventral tegmental area dopamine (DA) based on\nLHb PVDA (primary value -- at US time, computed at start of each trial\nand stored in LHbPVDA global value) and Amygdala (CeM) CS / learned\nvalue (LV) activations, which update every cycle."}, {Name: "RWPred", Doc: "RWPred has parameters for reward prediction using a simple Rescorla-Wagner\nlearning rule (i.e., PV learning in the Rubicon framework)."}, {Name: "RWDa", Doc: "RWDa has parameters for reward prediction dopamine using a simple\nRescorla-Wagner learning rule (i.e., PV learning in the Rubicon framework)."}, {Name: "TDInteg", Doc: "TDInteg has parameters for temporal differences (TD) reward integration layer."}, {Name: "TDDa", Doc: "TDDa has parameters for dopamine (DA) signal as the temporal difference\n(TD) between the TDIntegLayer activations in the minus and plus phase."}, {Name: "Indexes", Doc: "Indexes has recv and send pathway array access info."}}}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerParams", IDName: "layer-params", Doc: "LayerParams contains all of the layer parameters.\nThese values must remain constant over the course of computation.\nOn the GPU, they are loaded into a uniform.", Fields: []types.Field{{Name: "Type", Doc: "Type is the functional type of layer, which determines the code path\nfor specialized layer types, and is synchronized with [Layer.Type]."}, {Name: "Index", Doc: "Index of this layer in [Layers] list."}, {Name: "MaxData", Doc: "MaxData is the maximum number of data parallel elements."}, {Name: "PoolSt", Doc: "PoolSt is the start of pools for this layer; first one is always the layer-wide pool."}, {Name: "Acts", Doc: "Activation parameters and methods for computing activations"}, {Name: "Inhib", Doc: "Inhibition parameters and methods for computing layer-level inhibition"}, {Name: "LayInhib", Doc: "LayInhib has indexes of layers that contribute between-layer inhibition\n to this layer. Set these indexes via BuildConfig LayInhibXName (X = 1, 2...)."}, {Name: "Learn", Doc: "Learn has learning parameters and methods that operate at the neuron level."}, {Name: "Bursts", Doc: "Bursts has [BurstParams] that determine how the 5IB Burst activation\nis computed from CaSpkP integrated spiking values in Super layers."}, {Name: "CT", Doc: "CT has params for the CT corticothalamic layer and PTPred layer that\ngenerates predictions over the Pulvinar using context. Uses the CtxtGe\nexcitatory input plus stronger NMDA channels to maintain context trace."}, {Name: "Pulv", Doc: "Pulv has parameters for how the plus-phase (outcome) state of Pulvinar\nthalamic relay cell neurons is computed from the corresponding driver\nneuron Burst activation (or CaSpkP if not Super)."}, {Name: "Matrix", Doc: "Matrix has parameters for BG Striatum Matrix MSN layers, which are\nthe main Go / NoGo gating units in BG. GateThr also used in BGThal."}, {Name: "GP", Doc: "GP has params for GP (globus pallidus) of the BG layers."}, {Name: "LDT", Doc: "LDT has parameters for laterodorsal tegmentum ACh salience neuromodulatory\nsignal, driven by superior colliculus stimulus novelty, US input / absence,\nand OFC / ACC inhibition."}, {Name: "VTA", Doc: "VTA has parameters for ventral tegmental area dopamine (DA) based on\nLHb PVDA (primary value -- at US time, computed at start of each trial\nand stored in LHbPVDA global value) and Amygdala (CeM) CS / learned\nvalue (LV) activations, which update every cycle."}, {Name: "RWPred", Doc: "RWPred has parameters for reward prediction using a simple Rescorla-Wagner\nlearning rule (i.e., PV learning in the Rubicon framework)."}, {Name: "RWDa", Doc: "RWDa has parameters for reward prediction dopamine using a simple\nRescorla-Wagner learning rule (i.e., PV learning in the Rubicon framework)."}, {Name: "TDInteg", Doc: "TDInteg has parameters for temporal differences (TD) reward integration layer."}, {Name: "TDDa", Doc: "TDDa has parameters for dopamine (DA) signal as the temporal difference\n(TD) between the TDIntegLayer activations in the minus and plus phase."}, {Name: "Indexes", Doc: "Indexes has recv and send pathway array access info."}}}) var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerTypes", IDName: "layer-types", Doc: "LayerTypes enumerates all the different types of layers,\nfor the different algorithm types supported.\nClass parameter styles automatically key off of these types."}) @@ -94,7 +94,7 @@ var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LearnSynPa var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.ViewTimes", IDName: "view-times", Doc: "ViewTimes are the options for when the NetView can be updated."}) -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NetViewUpdate", IDName: "net-view-update", Doc: "NetViewUpdate manages time scales for updating the NetView.\nUse one of these for each mode you want to control separately.", Fields: []types.Field{{Name: "On", Doc: "toggles update of display on"}, {Name: "Time", Doc: "Time scale to update the network view."}, {Name: "View", Doc: "View is the network view."}}}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NetViewUpdate", IDName: "net-view-update", Doc: "NetViewUpdate manages time scales for updating the NetView.\nUse one of these for each mode you want to control separately.", Fields: []types.Field{{Name: "On", Doc: "On toggles update of display on"}, {Name: "Time", Doc: "Time scale to update the network view (Cycle to Trial timescales)."}, {Name: "CounterFunc", Doc: "CounterFunc returns the counter string showing current counters etc."}, {Name: "View", Doc: "View is the network view."}}}) var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.NetworkIndexes", IDName: "network-indexes", Doc: "NetworkIndexes are indexes and sizes for processing network.", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "MaxData", Doc: "MaxData is the maximum number of data inputs that can be processed\nin parallel in one pass of the network.\nNeuron storage is allocated to hold this amount during\nBuild process, and this value reflects that."}, {Name: "MaxDelay", Doc: "MaxDelay is the maximum synaptic delay across all pathways at the time of\n[Network.Build]. This determines the size of the spike sending delay buffers."}, {Name: "NLayers", Doc: "NLayers is the number of layers in the network."}, {Name: "NNeurons", Doc: "NNeurons is the total number of neurons."}, {Name: "NPools", Doc: "NPools is the total number of pools."}, {Name: "NPaths", Doc: "NPaths is the total number of paths."}, {Name: "NSyns", Doc: "NSyns is the total number of synapses."}, {Name: "RubiconNPosUSs", Doc: "RubiconNPosUSs is the total number of Rubicon Drives / positive USs."}, {Name: "RubiconNCosts", Doc: "RubiconNCosts is the total number of Rubicon Costs."}, {Name: "RubiconNNegUSs", Doc: "RubiconNNegUSs is the total number of .Rubicon Negative USs."}, {Name: "GPUMaxBuffFloats", Doc: "GPUMaxBuffFloats is the maximum size in float32 (4 bytes) of a GPU buffer\nneeded for GPU access."}, {Name: "GPUSynCaBanks", Doc: "GPUSyncCaBanks is the total number of SynCa banks of GPUMaxBufferBytes arrays in GPU."}}}) @@ -132,7 +132,7 @@ var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathParams var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PathTypes", IDName: "path-types", Doc: "PathTypes enumerates all the different types of axon pathways,\nfor the different algorithm types supported.\nClass parameter styles automatically key off of these types."}) -var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.MatrixParams", IDName: "matrix-params", Doc: "MatrixParams has parameters for BG Striatum Matrix MSN layers\nThese are the main Go / NoGo gating units in BG.\nDA, ACh learning rate modulation is pre-computed on the recv neuron\nRLRate variable via NeuroMod. Also uses Pool.Gated for InvertNoGate,\nupdated in PlusPhase prior to DWt call.\nMust set Learn.NeuroMod.DAMod = D1Mod or D2Mod via SetBuildConfig(\"DAMod\").", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "GateThr", Doc: "threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated"}, {Name: "IsVS", Doc: "is this a ventral striatum (VS) matrix layer? if true, the gating status of this layer is recorded in the Global state, and used for updating effort and other factors."}, {Name: "OtherMatrixIndex", Doc: "index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName"}, {Name: "ThalLay1Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used"}, {Name: "ThalLay2Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay2Name if present -- -1 if not used"}, {Name: "ThalLay3Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay3Name if present -- -1 if not used"}, {Name: "ThalLay4Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay4Name if present -- -1 if not used"}, {Name: "ThalLay5Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay5Name if present -- -1 if not used"}, {Name: "ThalLay6Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay6Name if present -- -1 if not used"}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.MatrixParams", IDName: "matrix-params", Doc: "MatrixParams has parameters for BG Striatum Matrix MSN layers\nThese are the main Go / NoGo gating units in BG.\nDA, ACh learning rate modulation is pre-computed on the recv neuron\nRLRate variable via NeuroMod. Also uses Pool.Gated for InvertNoGate,\nupdated in PlusPhase prior to DWt call.\nMust set Learn.NeuroMod.DAMod = D1Mod or D2Mod via SetBuildConfig(\"DAMod\").", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}}, Fields: []types.Field{{Name: "GateThr", Doc: "GateThr is the threshold on layer Avg SpkMax for Matrix Go and BG Thal\nlayers to count as having gated."}, {Name: "IsVS", Doc: "IsVS is this a ventral striatum (VS) matrix layer? If true, the gating\nstatus of this layer is recorded in the Global state,\nand used for updating effort and other factors."}, {Name: "OtherMatrixIndex", Doc: "index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName"}, {Name: "ThalLay1Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used"}, {Name: "ThalLay2Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay2Name if present -- -1 if not used"}, {Name: "ThalLay3Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay3Name if present -- -1 if not used"}, {Name: "ThalLay4Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay4Name if present -- -1 if not used"}, {Name: "ThalLay5Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay5Name if present -- -1 if not used"}, {Name: "ThalLay6Index", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay6Name if present -- -1 if not used"}, {Name: "pad"}, {Name: "pad1"}, {Name: "pad2"}}}) var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.GPLayerTypes", IDName: "gp-layer-types", Doc: "GPLayerTypes is a GPLayer axon-specific layer type enum."})