diff --git a/axon/gosl.go b/axon/gosl.go index 27aac156..05fa72e5 100644 --- a/axon/gosl.go +++ b/axon/gosl.go @@ -67,7 +67,7 @@ func GPUInit() { gpu.NewComputePipelineShaderFS(shaders, "shaders/CycleNeuron.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/CyclePost.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/DWtFromDiSyn.wgsl", sy) - gpu.NewComputePipelineShaderFS(shaders, "shaders/DWtSubMeanPath.wgsl", sy) + gpu.NewComputePipelineShaderFS(shaders, "shaders/DWtSubMeanNeuron.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/DWtSyn.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/GPUTestWrite.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/GatherSpikes.wgsl", sy) @@ -84,6 +84,8 @@ func GPUInit() { gpu.NewComputePipelineShaderFS(shaders, "shaders/PlusPhaseStartNeuron.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/PoolGi.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/SendSpike.wgsl", sy) + gpu.NewComputePipelineShaderFS(shaders, "shaders/SlowAdaptLayer.wgsl", sy) + gpu.NewComputePipelineShaderFS(shaders, "shaders/SlowAdaptNeuron.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/WtFromDWtLayer.wgsl", sy) gpu.NewComputePipelineShaderFS(shaders, "shaders/WtFromDWtSyn.wgsl", sy) vars := sy.Vars() @@ -412,46 +414,46 @@ func RunOneDWtFromDiSyn(n int, syncVars ...GPUVars) { RunDWtFromDiSynCPU(n) } } -// RunDWtSubMeanPath runs the DWtSubMeanPath kernel with given number of elements, +// RunDWtSubMeanNeuron runs the DWtSubMeanNeuron kernel with given number of elements, // on either the CPU or GPU depending on the UseGPU variable. // Can call multiple Run* kernels in a row, which are then all launched // in the same command submission on the GPU, which is by far the most efficient. // MUST call RunDone (with optional vars to sync) after all Run calls. -// Alternatively, a single-shot RunOneDWtSubMeanPath call does Run and Done for a +// Alternatively, a single-shot RunOneDWtSubMeanNeuron call does Run and Done for a // single run-and-sync case. -func RunDWtSubMeanPath(n int) { +func RunDWtSubMeanNeuron(n int) { if UseGPU { - RunDWtSubMeanPathGPU(n) + RunDWtSubMeanNeuronGPU(n) } else { - RunDWtSubMeanPathCPU(n) + RunDWtSubMeanNeuronCPU(n) } } -// RunDWtSubMeanPathGPU runs the DWtSubMeanPath kernel on the GPU. See [RunDWtSubMeanPath] for more info. -func RunDWtSubMeanPathGPU(n int) { +// RunDWtSubMeanNeuronGPU runs the DWtSubMeanNeuron kernel on the GPU. See [RunDWtSubMeanNeuron] for more info. +func RunDWtSubMeanNeuronGPU(n int) { sy := GPUSystem - pl := sy.ComputePipelines["DWtSubMeanPath"] + pl := sy.ComputePipelines["DWtSubMeanNeuron"] ce, _ := sy.BeginComputePass() pl.Dispatch1D(ce, n, 64) } -// RunDWtSubMeanPathCPU runs the DWtSubMeanPath kernel on the CPU. -func RunDWtSubMeanPathCPU(n int) { - gpu.VectorizeFunc(0, n, DWtSubMeanPath) +// RunDWtSubMeanNeuronCPU runs the DWtSubMeanNeuron kernel on the CPU. +func RunDWtSubMeanNeuronCPU(n int) { + gpu.VectorizeFunc(0, n, DWtSubMeanNeuron) } -// RunOneDWtSubMeanPath runs the DWtSubMeanPath kernel with given number of elements, +// RunOneDWtSubMeanNeuron runs the DWtSubMeanNeuron kernel with given number of elements, // on either the CPU or GPU depending on the UseGPU variable. // This version then calls RunDone with the given variables to sync // after the Run, for a single-shot Run-and-Done call. If multiple kernels // can be run in sequence, it is much more efficient to do multiple Run* // calls followed by a RunDone call. -func RunOneDWtSubMeanPath(n int, syncVars ...GPUVars) { +func RunOneDWtSubMeanNeuron(n int, syncVars ...GPUVars) { if UseGPU { - RunDWtSubMeanPathGPU(n) + RunDWtSubMeanNeuronGPU(n) RunDone(syncVars...) } else { - RunDWtSubMeanPathCPU(n) + RunDWtSubMeanNeuronCPU(n) } } // RunDWtSyn runs the DWtSyn kernel with given number of elements, @@ -1126,6 +1128,90 @@ func RunOneSendSpike(n int, syncVars ...GPUVars) { RunSendSpikeCPU(n) } } +// RunSlowAdaptLayer runs the SlowAdaptLayer kernel with given number of elements, +// on either the CPU or GPU depending on the UseGPU variable. +// Can call multiple Run* kernels in a row, which are then all launched +// in the same command submission on the GPU, which is by far the most efficient. +// MUST call RunDone (with optional vars to sync) after all Run calls. +// Alternatively, a single-shot RunOneSlowAdaptLayer call does Run and Done for a +// single run-and-sync case. +func RunSlowAdaptLayer(n int) { + if UseGPU { + RunSlowAdaptLayerGPU(n) + } else { + RunSlowAdaptLayerCPU(n) + } +} + +// RunSlowAdaptLayerGPU runs the SlowAdaptLayer kernel on the GPU. See [RunSlowAdaptLayer] for more info. +func RunSlowAdaptLayerGPU(n int) { + sy := GPUSystem + pl := sy.ComputePipelines["SlowAdaptLayer"] + ce, _ := sy.BeginComputePass() + pl.Dispatch1D(ce, n, 64) +} + +// RunSlowAdaptLayerCPU runs the SlowAdaptLayer kernel on the CPU. +func RunSlowAdaptLayerCPU(n int) { + gpu.VectorizeFunc(0, n, SlowAdaptLayer) +} + +// RunOneSlowAdaptLayer runs the SlowAdaptLayer kernel with given number of elements, +// on either the CPU or GPU depending on the UseGPU variable. +// This version then calls RunDone with the given variables to sync +// after the Run, for a single-shot Run-and-Done call. If multiple kernels +// can be run in sequence, it is much more efficient to do multiple Run* +// calls followed by a RunDone call. +func RunOneSlowAdaptLayer(n int, syncVars ...GPUVars) { + if UseGPU { + RunSlowAdaptLayerGPU(n) + RunDone(syncVars...) + } else { + RunSlowAdaptLayerCPU(n) + } +} +// RunSlowAdaptNeuron runs the SlowAdaptNeuron kernel with given number of elements, +// on either the CPU or GPU depending on the UseGPU variable. +// Can call multiple Run* kernels in a row, which are then all launched +// in the same command submission on the GPU, which is by far the most efficient. +// MUST call RunDone (with optional vars to sync) after all Run calls. +// Alternatively, a single-shot RunOneSlowAdaptNeuron call does Run and Done for a +// single run-and-sync case. +func RunSlowAdaptNeuron(n int) { + if UseGPU { + RunSlowAdaptNeuronGPU(n) + } else { + RunSlowAdaptNeuronCPU(n) + } +} + +// RunSlowAdaptNeuronGPU runs the SlowAdaptNeuron kernel on the GPU. See [RunSlowAdaptNeuron] for more info. +func RunSlowAdaptNeuronGPU(n int) { + sy := GPUSystem + pl := sy.ComputePipelines["SlowAdaptNeuron"] + ce, _ := sy.BeginComputePass() + pl.Dispatch1D(ce, n, 64) +} + +// RunSlowAdaptNeuronCPU runs the SlowAdaptNeuron kernel on the CPU. +func RunSlowAdaptNeuronCPU(n int) { + gpu.VectorizeFunc(0, n, SlowAdaptNeuron) +} + +// RunOneSlowAdaptNeuron runs the SlowAdaptNeuron kernel with given number of elements, +// on either the CPU or GPU depending on the UseGPU variable. +// This version then calls RunDone with the given variables to sync +// after the Run, for a single-shot Run-and-Done call. If multiple kernels +// can be run in sequence, it is much more efficient to do multiple Run* +// calls followed by a RunDone call. +func RunOneSlowAdaptNeuron(n int, syncVars ...GPUVars) { + if UseGPU { + RunSlowAdaptNeuronGPU(n) + RunDone(syncVars...) + } else { + RunSlowAdaptNeuronCPU(n) + } +} // RunWtFromDWtLayer runs the WtFromDWtLayer kernel with given number of elements, // on either the CPU or GPU depending on the UseGPU variable. // Can call multiple Run* kernels in a row, which are then all launched diff --git a/axon/layer.go b/axon/layer.go index 4dbb046a..5bca80b5 100644 --- a/axon/layer.go +++ b/axon/layer.go @@ -700,7 +700,7 @@ func (ly *Layer) SetWeights(lw *weights.Layer) error { } } } - ly.AvgDifFromTrgAvg(ctx) // update AvgPct based on loaded ActAvg values + ly.Params.AvgDifFromTrgAvg(ctx) // update AvgPct based on loaded ActAvg values return err } diff --git a/axon/layer.goal b/axon/layer.goal index d11a4b32..403de78f 100644 --- a/axon/layer.goal +++ b/axon/layer.goal @@ -698,7 +698,7 @@ func (ly *Layer) SetWeights(lw *weights.Layer) error { } } } - ly.AvgDifFromTrgAvg(ctx) // update AvgPct based on loaded ActAvg values + ly.Params.AvgDifFromTrgAvg(ctx) // update AvgPct based on loaded ActAvg values return err } diff --git a/axon/learn-layer.go b/axon/learn-layer.go index 7e3ae3c6..9f4aa50c 100644 --- a/axon/learn-layer.go +++ b/axon/learn-layer.go @@ -10,7 +10,7 @@ import "cogentcore.org/core/math32" //gosl:start -// DTrgSubMean subtracts the mean from DTrgAvg values +// DTrgSubMean subtracts the mean from DTrgAvg values. // Called by TrgAvgFromD func (ly *LayerParams) DTrgSubMean(ctx *Context) { submean := ly.Learn.TrgAvgAct.SubMean @@ -73,7 +73,7 @@ func (ly *LayerParams) DTrgSubMean(ctx *Context) { } } -// TrgAvgFromD updates TrgAvg from DTrgAvg -- called in PlusPhasePost +// TrgAvgFromD updates TrgAvg from DTrgAvg, called in PlusPhasePost. func (ly *LayerParams) TrgAvgFromD(ctx *Context) { lr := ly.LearnTrgAvgErrLRate() if lr == 0 { @@ -100,46 +100,55 @@ func (ly *LayerParams) WtFromDWtLayer(ctx *Context) { ly.TrgAvgFromD(ctx) } -//gosl:end +// DWtSubMean subtracts the mean DWt for each recv neuron. +func (ly *LayerParams) DWtSubMean(ctx *Context, ri uint32) { + lni := ri - ly.Indexes.NeurSt + rn := ly.Indexes.RecvN + for pi := uint32(0); pi < rn; pi++ { + pti := RecvPathIxs.Value(int(ly.Indexes.RecvSt + pi)) + Paths[pti].DWtSubMean(ctx, pti, ri, lni) + } +} -// SlowAdapt is the layer-level slow adaptation functions. +//////// SlowAdapt + +// SlowAdaptLayer is the layer-level slow adaptation functions. // Calls AdaptInhib and AvgDifFromTrgAvg for Synaptic Scaling. // Does NOT call pathway-level methods. -func (ly *Layer) SlowAdapt(ctx *Context) { +func (ly *LayerParams) SlowAdaptLayer(ctx *Context) { ly.AdaptInhib(ctx) ly.AvgDifFromTrgAvg(ctx) - // note: path level call happens at network level } -// AdaptInhib adapts inhibition -func (ly *Layer) AdaptInhib(ctx *Context) { - if ly.Params.Inhib.ActAvg.AdaptGi.IsFalse() || ly.Params.IsInput() { +// AdaptInhib adapts inhibition. +func (ly *LayerParams) AdaptInhib(ctx *Context) { + if ly.Inhib.ActAvg.AdaptGi.IsFalse() || ly.IsInput() { return } for di := uint32(0); di < ctx.NData; di++ { giMult := LayerStates.Value(int(ly.Index), int(di), int(LayerGiMult)) avg := LayerStates.Value(int(ly.Index), int(di), int(LayerActMAvg)) - ly.Params.Inhib.ActAvg.Adapt(&giMult, avg) + ly.Inhib.ActAvg.Adapt(&giMult, avg) LayerStates.Set(giMult, int(ly.Index), int(di), int(LayerGiMult)) } } // AvgDifFromTrgAvg updates neuron-level AvgDif values from AvgPct - TrgAvg // which is then used for synaptic scaling of LWt values in Path SynScale. -func (ly *Layer) AvgDifFromTrgAvg(ctx *Context) { +func (ly *LayerParams) AvgDifFromTrgAvg(ctx *Context) { sp := uint32(0) - if ly.NPools > 1 { + if ly.Indexes.NPools > 1 { sp = 1 } - np := ly.NPools + np := ly.Indexes.NPools for spi := sp; spi < np; spi++ { - pi := ly.Params.PoolIndex(spi) + pi := ly.PoolIndex(spi) nsi := PoolsInt.Value(int(pi), int(0), int(PoolNeurSt)) nei := PoolsInt.Value(int(pi), int(0), int(PoolNeurEd)) plavg := float32(0) nn := 0 for lni := nsi; lni < nei; lni++ { - ni := ly.NeurStIndex + uint32(lni) + ni := ly.Indexes.NeurSt + uint32(lni) if NeuronIsOff(ni) { continue } @@ -155,7 +164,7 @@ func (ly *Layer) AvgDifFromTrgAvg(ctx *Context) { } PoolAvgDifInit(pi, 0) for lni := nsi; lni < nei; lni++ { - ni := ly.NeurStIndex + uint32(lni) + ni := ly.Indexes.NeurSt + uint32(lni) if NeuronIsOff(ni) { continue } @@ -172,12 +181,12 @@ func (ly *Layer) AvgDifFromTrgAvg(ctx *Context) { } } if sp == 1 { // update layer pool - lpi := ly.Params.PoolIndex(0) + lpi := ly.PoolIndex(0) PoolAvgDifInit(lpi, 0) nsi := PoolsInt.Value(int(lpi), int(0), int(PoolNeurSt)) nei := PoolsInt.Value(int(lpi), int(0), int(PoolNeurEd)) for lni := nsi; lni < nei; lni++ { - ni := ly.NeurStIndex + uint32(lni) + ni := ly.Indexes.NeurSt + uint32(lni) if NeuronIsOff(ni) { continue } @@ -192,6 +201,19 @@ func (ly *Layer) AvgDifFromTrgAvg(ctx *Context) { } } +// SlowAdaptNeuron does path & synapse level slow adaptation on SWt and +// overall synaptic scaling, per each receiving neuron ri. +func (ly *LayerParams) SlowAdaptNeuron(ctx *Context, ri uint32) { + lni := ri - ly.Indexes.NeurSt + rn := ly.Indexes.RecvN + for pi := uint32(0); pi < rn; pi++ { + pti := RecvPathIxs.Value(int(ly.Indexes.RecvSt + pi)) + Paths[pti].SlowAdapt(ctx, ly, pti, ri, lni) + } +} + +//gosl:end + // LRateMod sets the LRate modulation parameter for Paths, which is // for dynamic modulation of learning rate (see also LRateSched). // Updates the effective learning rate factor accordingly. diff --git a/axon/learn-layer.goal b/axon/learn-layer.goal index 7847629a..e0479b24 100644 --- a/axon/learn-layer.goal +++ b/axon/learn-layer.goal @@ -8,7 +8,7 @@ import "cogentcore.org/core/math32" //gosl:start -// DTrgSubMean subtracts the mean from DTrgAvg values +// DTrgSubMean subtracts the mean from DTrgAvg values. // Called by TrgAvgFromD func (ly *LayerParams) DTrgSubMean(ctx *Context) { submean := ly.Learn.TrgAvgAct.SubMean @@ -71,7 +71,7 @@ func (ly *LayerParams) DTrgSubMean(ctx *Context) { } } -// TrgAvgFromD updates TrgAvg from DTrgAvg -- called in PlusPhasePost +// TrgAvgFromD updates TrgAvg from DTrgAvg, called in PlusPhasePost. func (ly *LayerParams) TrgAvgFromD(ctx *Context) { lr := ly.LearnTrgAvgErrLRate() if lr == 0 { @@ -98,46 +98,56 @@ func (ly *LayerParams) WtFromDWtLayer(ctx *Context) { ly.TrgAvgFromD(ctx) } -//gosl:end -// SlowAdapt is the layer-level slow adaptation functions. +// DWtSubMean subtracts the mean DWt for each recv neuron. +func (ly *LayerParams) DWtSubMean(ctx *Context, ri uint32) { + lni := ri - ly.Indexes.NeurSt + rn := ly.Indexes.RecvN + for pi := uint32(0); pi < rn; pi++ { + pti := RecvPathIxs.Value(int(ly.Indexes.RecvSt + pi)) + Paths[pti].DWtSubMean(ctx, pti, ri, lni) + } +} + +//////// SlowAdapt + +// SlowAdaptLayer is the layer-level slow adaptation functions. // Calls AdaptInhib and AvgDifFromTrgAvg for Synaptic Scaling. // Does NOT call pathway-level methods. -func (ly *Layer) SlowAdapt(ctx *Context) { +func (ly *LayerParams) SlowAdaptLayer(ctx *Context) { ly.AdaptInhib(ctx) ly.AvgDifFromTrgAvg(ctx) - // note: path level call happens at network level } -// AdaptInhib adapts inhibition -func (ly *Layer) AdaptInhib(ctx *Context) { - if ly.Params.Inhib.ActAvg.AdaptGi.IsFalse() || ly.Params.IsInput() { +// AdaptInhib adapts inhibition. +func (ly *LayerParams) AdaptInhib(ctx *Context) { + if ly.Inhib.ActAvg.AdaptGi.IsFalse() || ly.IsInput() { return } for di := uint32(0); di < ctx.NData; di++ { giMult := LayerStates[ly.Index, di, LayerGiMult] avg := LayerStates[ly.Index, di, LayerActMAvg] - ly.Params.Inhib.ActAvg.Adapt(&giMult, avg) + ly.Inhib.ActAvg.Adapt(&giMult, avg) LayerStates[ly.Index, di, LayerGiMult] = giMult } } // AvgDifFromTrgAvg updates neuron-level AvgDif values from AvgPct - TrgAvg // which is then used for synaptic scaling of LWt values in Path SynScale. -func (ly *Layer) AvgDifFromTrgAvg(ctx *Context) { +func (ly *LayerParams) AvgDifFromTrgAvg(ctx *Context) { sp := uint32(0) - if ly.NPools > 1 { + if ly.Indexes.NPools > 1 { sp = 1 } - np := ly.NPools + np := ly.Indexes.NPools for spi := sp; spi < np; spi++ { - pi := ly.Params.PoolIndex(spi) + pi := ly.PoolIndex(spi) nsi := PoolsInt[pi, 0, PoolNeurSt] nei := PoolsInt[pi, 0, PoolNeurEd] plavg := float32(0) nn := 0 for lni := nsi; lni < nei; lni++ { - ni := ly.NeurStIndex + uint32(lni) + ni := ly.Indexes.NeurSt + uint32(lni) if NeuronIsOff(ni) { continue } @@ -153,7 +163,7 @@ func (ly *Layer) AvgDifFromTrgAvg(ctx *Context) { } PoolAvgDifInit(pi, 0) for lni := nsi; lni < nei; lni++ { - ni := ly.NeurStIndex + uint32(lni) + ni := ly.Indexes.NeurSt + uint32(lni) if NeuronIsOff(ni) { continue } @@ -170,12 +180,12 @@ func (ly *Layer) AvgDifFromTrgAvg(ctx *Context) { } } if sp == 1 { // update layer pool - lpi := ly.Params.PoolIndex(0) + lpi := ly.PoolIndex(0) PoolAvgDifInit(lpi, 0) nsi := PoolsInt[lpi, 0, PoolNeurSt] nei := PoolsInt[lpi, 0, PoolNeurEd] for lni := nsi; lni < nei; lni++ { - ni := ly.NeurStIndex + uint32(lni) + ni := ly.Indexes.NeurSt + uint32(lni) if NeuronIsOff(ni) { continue } @@ -190,6 +200,19 @@ func (ly *Layer) AvgDifFromTrgAvg(ctx *Context) { } } +// SlowAdaptNeuron does path & synapse level slow adaptation on SWt and +// overall synaptic scaling, per each receiving neuron ri. +func (ly *LayerParams) SlowAdaptNeuron(ctx *Context, ri uint32) { + lni := ri - ly.Indexes.NeurSt + rn := ly.Indexes.RecvN + for pi := uint32(0); pi < rn; pi++ { + pti := RecvPathIxs.Value(int(ly.Indexes.RecvSt + pi)) + Paths[pti].SlowAdapt(ctx, ly, pti, ri, lni) + } +} + +//gosl:end + // LRateMod sets the LRate modulation parameter for Paths, which is // for dynamic modulation of learning rate (see also LRateSched). // Updates the effective learning rate factor accordingly. diff --git a/axon/learn-net.go b/axon/learn-net.go index 453b31f3..faf59492 100644 --- a/axon/learn-net.go +++ b/axon/learn-net.go @@ -25,12 +25,12 @@ func (nt *Network) WtFromDWt() { nix := nt.NetIxs() ctx := nt.Context() RunWtFromDWtLayer(int(nix.NLayers)) - RunDWtSubMeanPath(int(nix.NPaths)) + RunDWtSubMeanNeuron(int(nix.NNeurons)) RunWtFromDWtSyn(int(nix.NSyns)) - RunDoneSynapses() if ctx.SlowInc() { nt.SlowAdapt() } + RunDoneSynapses() } // DWtToWt computes the weight change (learning) based on current @@ -43,29 +43,21 @@ func (nt *Network) DWtToWt() { sd := int(nix.NSyns * ctx.NData) RunDWtSyn(sd) RunDWtFromDiSyn(int(nix.NSyns)) - RunDWtSubMeanPath(int(nix.NPaths)) + RunWtFromDWtLayer(int(nix.NLayers)) + RunDWtSubMeanNeuron(int(nix.NNeurons)) RunWtFromDWtSyn(int(nix.NSyns)) - RunDoneSynapses() - // RunDone() // sig faster if ctx.SlowInc() { nt.SlowAdapt() - ToGPUSynapses() } + RunDone() } // SlowAdapt is the layer-level slow adaptation functions: Synaptic scaling, // and adapting inhibition. func (nt *Network) SlowAdapt() { - ctx := nt.Context() - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.SlowAdapt(ctx) - } - for _, pt := range nt.Paths { - pt.SlowAdapt(ctx) - } + nix := nt.NetIxs() + RunSlowAdaptLayer(int(nix.NLayers)) + RunSlowAdaptNeuron(int(nix.NNeurons)) } // LRateMod sets the LRate modulation parameter for Paths, which is @@ -246,11 +238,12 @@ func WtFromDWtLayer(li uint32) { //gosl:kernel Layers[li].WtFromDWtLayer(ctx) } -// DWtSubMeanPath is the kernel over Paths to -// compute DWt - mean(DWt). -func DWtSubMeanPath(pti uint32) { //gosl:kernel +// DWtSubMeanNeuron is the kernel over Paths to +// compute DWt - mean(DWt) for each recv neuron. +func DWtSubMeanNeuron(ni uint32) { //gosl:kernel ctx := GetCtx(0) - Paths[pti].DWtSubMean(ctx, pti) + li := NeuronIxs.Value(int(ni), int(NrnLayIndex)) + Layers[li].DWtSubMean(ctx, ni) } // WtFromDWtSyn is the kernel over Synapses (not * Data) to @@ -261,4 +254,20 @@ func WtFromDWtSyn(syni uint32) { //gosl:kernel Paths[pti].WtFromDWtSyn(ctx, syni) } +// SlowAdaptLayer is the kernel over Layers (not * Data) to +// run slow adaptation functions. +// Calls AdaptInhib and AvgDifFromTrgAvg for Synaptic Scaling. +func SlowAdaptLayer(li uint32) { //gosl:kernel + ctx := GetCtx(0) + Layers[li].SlowAdaptLayer(ctx) +} + +// SlowAdaptNeuron is the kernel over receiving Neurons to +// compute slow adaptation in receiving pathways. +func SlowAdaptNeuron(ni uint32) { //gosl:kernel + ctx := GetCtx(0) + li := NeuronIxs.Value(int(ni), int(NrnLayIndex)) + Layers[li].SlowAdaptNeuron(ctx, ni) +} + //gosl:end diff --git a/axon/learn-net.goal b/axon/learn-net.goal index 0d72827d..5ccaa022 100644 --- a/axon/learn-net.goal +++ b/axon/learn-net.goal @@ -23,12 +23,12 @@ func (nt *Network) WtFromDWt() { nix := nt.NetIxs() ctx := nt.Context() RunWtFromDWtLayer(int(nix.NLayers)) - RunDWtSubMeanPath(int(nix.NPaths)) + RunDWtSubMeanNeuron(int(nix.NNeurons)) RunWtFromDWtSyn(int(nix.NSyns)) - RunDoneSynapses() if ctx.SlowInc() { nt.SlowAdapt() } + RunDoneSynapses() } @@ -42,29 +42,21 @@ func (nt *Network) DWtToWt() { sd := int(nix.NSyns * ctx.NData) RunDWtSyn(sd) RunDWtFromDiSyn(int(nix.NSyns)) - RunDWtSubMeanPath(int(nix.NPaths)) + RunWtFromDWtLayer(int(nix.NLayers)) + RunDWtSubMeanNeuron(int(nix.NNeurons)) RunWtFromDWtSyn(int(nix.NSyns)) - RunDoneSynapses() - // RunDone() // sig faster if ctx.SlowInc() { nt.SlowAdapt() - ToGPUSynapses() } + RunDone() } // SlowAdapt is the layer-level slow adaptation functions: Synaptic scaling, // and adapting inhibition. func (nt *Network) SlowAdapt() { - ctx := nt.Context() - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.SlowAdapt(ctx) - } - for _, pt := range nt.Paths { - pt.SlowAdapt(ctx) - } + nix := nt.NetIxs() + RunSlowAdaptLayer(int(nix.NLayers)) + RunSlowAdaptNeuron(int(nix.NNeurons)) } // LRateMod sets the LRate modulation parameter for Paths, which is @@ -242,11 +234,12 @@ func WtFromDWtLayer(li uint32) { //gosl:kernel Layers[li].WtFromDWtLayer(ctx) } -// DWtSubMeanPath is the kernel over Paths to -// compute DWt - mean(DWt). -func DWtSubMeanPath(pti uint32) { //gosl:kernel +// DWtSubMeanNeuron is the kernel over Paths to +// compute DWt - mean(DWt) for each recv neuron. +func DWtSubMeanNeuron(ni uint32) { //gosl:kernel ctx := GetCtx(0) - Paths[pti].DWtSubMean(ctx, pti) + li := NeuronIxs[ni, NrnLayIndex] + Layers[li].DWtSubMean(ctx, ni) } // WtFromDWtSyn is the kernel over Synapses (not * Data) to @@ -257,5 +250,21 @@ func WtFromDWtSyn(syni uint32) { //gosl:kernel Paths[pti].WtFromDWtSyn(ctx, syni) } +// SlowAdaptLayer is the kernel over Layers (not * Data) to +// run slow adaptation functions. +// Calls AdaptInhib and AvgDifFromTrgAvg for Synaptic Scaling. +func SlowAdaptLayer(li uint32) { //gosl:kernel + ctx := GetCtx(0) + Layers[li].SlowAdaptLayer(ctx) +} + +// SlowAdaptNeuron is the kernel over receiving Neurons to +// compute slow adaptation in receiving pathways. +func SlowAdaptNeuron(ni uint32) { //gosl:kernel + ctx := GetCtx(0) + li := NeuronIxs[ni, NrnLayIndex] + Layers[li].SlowAdaptNeuron(ctx, ni) +} + //gosl:end diff --git a/axon/learn-path.go b/axon/learn-path.go index 4734a4ec..ff5d48f9 100644 --- a/axon/learn-path.go +++ b/axon/learn-path.go @@ -6,7 +6,9 @@ package axon -import "cogentcore.org/core/math32" +import ( + "cogentcore.org/core/math32" +) //gosl:start @@ -368,9 +370,10 @@ func (pt *PathParams) DWtFromDi(ctx *Context, syni uint32) { Synapses.SetAdd(dwt, int(syni), int(DWt)) } -// DWtSubMean subtracts the mean from any pathways that have SubMean > 0. +// DWtSubMean subtracts the mean for given recv neuron ri, +// for pathways that have SubMean > 0. // This is called on *receiving* pathways, prior to WtFromDwt. -func (pt *PathParams) DWtSubMean(ctx *Context, pti uint32) { +func (pt *PathParams) DWtSubMean(ctx *Context, pti, ri, lni uint32) { if pt.Learn.Learn.IsFalse() { return } @@ -378,18 +381,18 @@ func (pt *PathParams) DWtSubMean(ctx *Context, pti uint32) { if sm == 0 { // note default is now 0, so don't exclude Target layers, which should be 0 return } - ri := pt.Indexes.RecvLayer - lni := ri - pt.Indexes.RecvNeurSt cni := pt.Indexes.RecvConSt + lni - synn := int(pt.Indexes.RecvSynSt + PathRecvCon.Value(int(StartNN), int(cni))) + synn := PathRecvCon.Value(int(cni), int(Nitems)) + if synn < 1 { return } - synst := pt.Indexes.RecvSynSt + PathRecvCon.Value(int(StartOff), int(cni)) + synst := pt.Indexes.RecvSynSt + PathRecvCon.Value(int(cni), int(StartOff)) + sumDWt := float32(0) nnz := 0 // non-zero - for ci := range synn { - syni := RecvSynIxs.Value(int(synst) + ci) + for ci := uint32(0); ci < synn; ci++ { + syni := RecvSynIxs.Value(int(synst + ci)) dw := Synapses.Value(int(syni), int(DWt)) if dw != 0 { sumDWt += dw @@ -400,8 +403,8 @@ func (pt *PathParams) DWtSubMean(ctx *Context, pti uint32) { return } sumDWt /= float32(nnz) - for ci := range synn { - syni := RecvSynIxs.Value(int(synst) + ci) + for ci := uint32(0); ci < synn; ci++ { + syni := RecvSynIxs.Value(int(synst + ci)) if Synapses.Value(int(syni), int(DWt)) != 0 { Synapses.SetAdd(-sm*sumDWt, int(syni), int(DWt)) } @@ -452,106 +455,96 @@ func (pt *PathParams) WtFromDWtSynNoLimits(ctx *Context, syni uint32) { Synapses.Set(0.0, int(syni), int(DWt)) } -//gosl:end - -// todo: rewrite below for PathParams target - // SlowAdapt does the slow adaptation: SWt learning and SynScale -func (pj *Path) SlowAdapt(ctx *Context) { - pj.SWtFromWt(ctx) - pj.SynScale(ctx) +func (pt *PathParams) SlowAdapt(ctx *Context, rlay *LayerParams, pti, ri, lni uint32) { + pt.SWtFromWt(ctx, rlay, pti, ri, lni) + pt.SynScale(ctx, rlay, pti, ri, lni) } // SWtFromWt updates structural, slowly adapting SWt value based on // accumulated DSWt values, which are zero-summed with additional soft bounding // relative to SWt limits. -func (pj *Path) SWtFromWt(ctx *Context) { - if pj.Params.Learn.Learn.IsFalse() || pj.Params.SWts.Adapt.On.IsFalse() { +func (pt *PathParams) SWtFromWt(ctx *Context, rlay *LayerParams, pti, ri, lni uint32) { + if pt.Learn.Learn.IsFalse() || pt.SWts.Adapt.On.IsFalse() { return } - rlay := pj.Recv - if rlay.Params.IsTarget() { + if rlay.IsTarget() { return } - mx := pj.Params.SWts.Limit.Max - mn := pj.Params.SWts.Limit.Min - lr := pj.Params.SWts.Adapt.LRate - for lni := uint32(0); lni < rlay.NNeurons; lni++ { - syIndexes := pj.RecvSynIxs(lni) - nCons := len(syIndexes) - if nCons < 1 { - continue - } - avgDWt := float32(0) - for _, syi := range syIndexes { - syni := pj.SynStIndex + syi - swt := Synapses.Value(int(syni), int(SWt)) - // softbound for SWt - if Synapses.Value(int(syni), int(DSWt)) >= 0 { - Synapses.SetMul((mx - swt), int(syni), int(DSWt)) - } else { - Synapses.SetMul((swt - mn), int(syni), int(DSWt)) - } - avgDWt += Synapses.Value(int(syni), int(DSWt)) - } - avgDWt /= float32(nCons) - avgDWt *= pj.Params.SWts.Adapt.SubMean - for _, syi := range syIndexes { - syni := pj.SynStIndex + syi - Synapses.SetAdd(lr*(Synapses.Value(int(syni), int(DSWt))-avgDWt), int(syni), int(SWt)) - swt := Synapses.Value(int(syni), int(SWt)) - Synapses.Set(0, int(syni), int(DSWt)) - Synapses.Set(pj.Params.SWts.LWtFromWts(Synapses.Value(int(syni), int(Wt)), swt), int(syni), int(LWt)) - Synapses.Set(pj.Params.SWts.WtValue(swt, Synapses.Value(int(syni), int(LWt))), int(syni), int(Wt)) + mx := pt.SWts.Limit.Max + mn := pt.SWts.Limit.Min + lr := pt.SWts.Adapt.LRate + + cni := pt.Indexes.RecvConSt + lni + synn := PathRecvCon.Value(int(cni), int(Nitems)) + synst := pt.Indexes.RecvSynSt + PathRecvCon.Value(int(cni), int(StartOff)) + + avgDWt := float32(0) + for ci := uint32(0); ci < synn; ci++ { + syni := RecvSynIxs.Value(int(synst + ci)) + swt := Synapses.Value(int(syni), int(SWt)) + // softbound for SWt + if Synapses.Value(int(syni), int(DSWt)) >= 0 { + Synapses.SetMul((mx - swt), int(syni), int(DSWt)) + } else { + Synapses.SetMul((swt - mn), int(syni), int(DSWt)) } + avgDWt += Synapses.Value(int(syni), int(DSWt)) + } + avgDWt /= float32(synn) + avgDWt *= pt.SWts.Adapt.SubMean + for ci := uint32(0); ci < synn; ci++ { + syni := RecvSynIxs.Value(int(synst + ci)) + Synapses.SetAdd(lr*(Synapses.Value(int(syni), int(DSWt))-avgDWt), int(syni), int(SWt)) + swt := Synapses.Value(int(syni), int(SWt)) + Synapses.Set(0.0, int(syni), int(DSWt)) + Synapses.Set(pt.SWts.LWtFromWts(Synapses.Value(int(syni), int(Wt)), swt), int(syni), int(LWt)) + Synapses.Set(pt.SWts.WtValue(swt, Synapses.Value(int(syni), int(LWt))), int(syni), int(Wt)) } } // SynScale performs synaptic scaling based on running average activation vs. targets. // Layer-level AvgDifFromTrgAvg function must be called first. -func (pj *Path) SynScale(ctx *Context) { - if pj.Params.Learn.Learn.IsFalse() || pj.Params.IsInhib() { +func (pt *PathParams) SynScale(ctx *Context, rlay *LayerParams, pti, ri, lni uint32) { + if pt.Learn.Learn.IsFalse() || pt.IsInhib() { return } - rlay := pj.Recv - if !rlay.Params.IsLearnTrgAvg() { + if !rlay.IsLearnTrgAvg() { return } - tp := &rlay.Params.Learn.TrgAvgAct - lr := tp.SynScaleRate - for lni := uint32(0); lni < rlay.NNeurons; lni++ { - ri := rlay.NeurStIndex + lni - if NeuronIsOff(ri) { - continue - } - adif := -lr * NeuronAvgs.Value(int(ri), int(AvgDif)) - syIndexes := pj.RecvSynIxs(lni) - for _, syi := range syIndexes { - syni := pj.SynStIndex + syi - lwt := Synapses.Value(int(syni), int(LWt)) - swt := Synapses.Value(int(syni), int(SWt)) - if adif >= 0 { // key to have soft bounding on lwt here! - Synapses.SetAdd((1-lwt)*adif*swt, int(syni), int(LWt)) - } else { - Synapses.SetAdd(lwt*adif*swt, int(syni), int(LWt)) - } - Synapses.Set(pj.Params.SWts.WtValue(swt, Synapses.Value(int(syni), int(LWt))), int(syni), int(Wt)) + lr := rlay.Learn.TrgAvgAct.SynScaleRate + + cni := pt.Indexes.RecvConSt + lni + synn := PathRecvCon.Value(int(cni), int(Nitems)) + synst := pt.Indexes.RecvSynSt + PathRecvCon.Value(int(cni), int(StartOff)) + adif := -lr * NeuronAvgs.Value(int(ri), int(AvgDif)) + for ci := uint32(0); ci < synn; ci++ { + syni := RecvSynIxs.Value(int(synst + ci)) + lwt := Synapses.Value(int(syni), int(LWt)) + swt := Synapses.Value(int(syni), int(SWt)) + if adif >= 0 { // key to have soft bounding on lwt here! + Synapses.SetAdd((1-lwt)*adif*swt, int(syni), int(LWt)) + } else { + Synapses.SetAdd(lwt*adif*swt, int(syni), int(LWt)) } + Synapses.Set(pt.SWts.WtValue(swt, Synapses.Value(int(syni), int(LWt))), int(syni), int(Wt)) } } +//gosl:end + // LRateMod sets the LRate modulation parameter for Paths, which is // for dynamic modulation of learning rate (see also LRateSched). // Updates the effective learning rate factor accordingly. -func (pj *Path) LRateMod(mod float32) { - pj.Params.Learn.LRate.Mod = mod - pj.Params.Learn.LRate.Update() +func (pt *Path) LRateMod(mod float32) { + pt.Params.Learn.LRate.Mod = mod + pt.Params.Learn.LRate.Update() } // LRateSched sets the schedule-based learning rate multiplier. // See also LRateMod. // Updates the effective learning rate factor accordingly. -func (pj *Path) LRateSched(sched float32) { - pj.Params.Learn.LRate.Sched = sched - pj.Params.Learn.LRate.Update() +func (pt *Path) LRateSched(sched float32) { + pt.Params.Learn.LRate.Sched = sched + pt.Params.Learn.LRate.Update() } diff --git a/axon/learn-path.goal b/axon/learn-path.goal index a37079f8..0dd6f612 100644 --- a/axon/learn-path.goal +++ b/axon/learn-path.goal @@ -4,7 +4,9 @@ package axon -import "cogentcore.org/core/math32" +import ( + "cogentcore.org/core/math32" +) //gosl:start @@ -366,9 +368,10 @@ func (pt *PathParams) DWtFromDi(ctx *Context, syni uint32) { Synapses[syni, DWt] += dwt } -// DWtSubMean subtracts the mean from any pathways that have SubMean > 0. +// DWtSubMean subtracts the mean for given recv neuron ri, +// for pathways that have SubMean > 0. // This is called on *receiving* pathways, prior to WtFromDwt. -func (pt *PathParams) DWtSubMean(ctx *Context, pti uint32) { +func (pt *PathParams) DWtSubMean(ctx *Context, pti, ri, lni uint32) { if pt.Learn.Learn.IsFalse() { return } @@ -376,18 +379,18 @@ func (pt *PathParams) DWtSubMean(ctx *Context, pti uint32) { if sm == 0 { // note default is now 0, so don't exclude Target layers, which should be 0 return } - ri := pt.Indexes.RecvLayer - lni := ri - pt.Indexes.RecvNeurSt cni := pt.Indexes.RecvConSt + lni - synn := int(pt.Indexes.RecvSynSt + PathRecvCon[StartNN, cni]) + synn := PathRecvCon[cni, Nitems] + if synn < 1 { return } - synst := pt.Indexes.RecvSynSt + PathRecvCon[StartOff, cni] + synst := pt.Indexes.RecvSynSt + PathRecvCon[cni, StartOff] + sumDWt := float32(0) nnz := 0 // non-zero - for ci := range synn { - syni := RecvSynIxs.Value(int(synst) + ci) + for ci := uint32(0); ci < synn; ci++ { + syni := RecvSynIxs.Value(int(synst + ci)) dw := Synapses[syni, DWt] if dw != 0 { sumDWt += dw @@ -398,8 +401,8 @@ func (pt *PathParams) DWtSubMean(ctx *Context, pti uint32) { return } sumDWt /= float32(nnz) - for ci := range synn { - syni := RecvSynIxs.Value(int(synst) + ci) + for ci := uint32(0); ci < synn; ci++ { + syni := RecvSynIxs.Value(int(synst + ci)) if Synapses[syni, DWt] != 0 { Synapses[syni, DWt] += -sm * sumDWt } @@ -450,106 +453,96 @@ func (pt *PathParams) WtFromDWtSynNoLimits(ctx *Context, syni uint32) { Synapses[syni, DWt] = 0.0 } -//gosl:end - -// todo: rewrite below for PathParams target - // SlowAdapt does the slow adaptation: SWt learning and SynScale -func (pj *Path) SlowAdapt(ctx *Context) { - pj.SWtFromWt(ctx) - pj.SynScale(ctx) +func (pt *PathParams) SlowAdapt(ctx *Context, rlay *LayerParams, pti, ri, lni uint32) { + pt.SWtFromWt(ctx, rlay, pti, ri, lni) + pt.SynScale(ctx, rlay, pti, ri, lni) } // SWtFromWt updates structural, slowly adapting SWt value based on // accumulated DSWt values, which are zero-summed with additional soft bounding // relative to SWt limits. -func (pj *Path) SWtFromWt(ctx *Context) { - if pj.Params.Learn.Learn.IsFalse() || pj.Params.SWts.Adapt.On.IsFalse() { +func (pt *PathParams) SWtFromWt(ctx *Context, rlay *LayerParams, pti, ri, lni uint32) { + if pt.Learn.Learn.IsFalse() || pt.SWts.Adapt.On.IsFalse() { return } - rlay := pj.Recv - if rlay.Params.IsTarget() { + if rlay.IsTarget() { return } - mx := pj.Params.SWts.Limit.Max - mn := pj.Params.SWts.Limit.Min - lr := pj.Params.SWts.Adapt.LRate - for lni := uint32(0); lni < rlay.NNeurons; lni++ { - syIndexes := pj.RecvSynIxs(lni) - nCons := len(syIndexes) - if nCons < 1 { - continue - } - avgDWt := float32(0) - for _, syi := range syIndexes { - syni := pj.SynStIndex + syi - swt := Synapses[syni, SWt] - // softbound for SWt - if Synapses[syni, DSWt] >= 0 { - Synapses[syni, DSWt] *= (mx - swt) - } else { - Synapses[syni, DSWt] *= (swt - mn) - } - avgDWt += Synapses[syni, DSWt] - } - avgDWt /= float32(nCons) - avgDWt *= pj.Params.SWts.Adapt.SubMean - for _, syi := range syIndexes { - syni := pj.SynStIndex + syi - Synapses[syni, SWt] += lr * (Synapses[syni, DSWt] - avgDWt) - swt := Synapses[syni, SWt] - Synapses[syni, DSWt] = 0 - Synapses[syni, LWt] = pj.Params.SWts.LWtFromWts(Synapses[syni, Wt], swt) - Synapses[syni, Wt] = pj.Params.SWts.WtValue(swt, Synapses[syni, LWt]) + mx := pt.SWts.Limit.Max + mn := pt.SWts.Limit.Min + lr := pt.SWts.Adapt.LRate + + cni := pt.Indexes.RecvConSt + lni + synn := PathRecvCon[cni, Nitems] + synst := pt.Indexes.RecvSynSt + PathRecvCon[cni, StartOff] + + avgDWt := float32(0) + for ci := uint32(0); ci < synn; ci++ { + syni := RecvSynIxs.Value(int(synst + ci)) + swt := Synapses[syni, SWt] + // softbound for SWt + if Synapses[syni, DSWt] >= 0 { + Synapses[syni, DSWt] *= (mx - swt) + } else { + Synapses[syni, DSWt] *= (swt - mn) } + avgDWt += Synapses[syni, DSWt] + } + avgDWt /= float32(synn) + avgDWt *= pt.SWts.Adapt.SubMean + for ci := uint32(0); ci < synn; ci++ { + syni := RecvSynIxs.Value(int(synst + ci)) + Synapses[syni, SWt] += lr * (Synapses[syni, DSWt] - avgDWt) + swt := Synapses[syni, SWt] + Synapses[syni, DSWt] = 0.0 + Synapses[syni, LWt] = pt.SWts.LWtFromWts(Synapses[syni, Wt], swt) + Synapses[syni, Wt] = pt.SWts.WtValue(swt, Synapses[syni, LWt]) } } // SynScale performs synaptic scaling based on running average activation vs. targets. // Layer-level AvgDifFromTrgAvg function must be called first. -func (pj *Path) SynScale(ctx *Context) { - if pj.Params.Learn.Learn.IsFalse() || pj.Params.IsInhib() { +func (pt *PathParams) SynScale(ctx *Context, rlay *LayerParams, pti, ri, lni uint32) { + if pt.Learn.Learn.IsFalse() || pt.IsInhib() { return } - rlay := pj.Recv - if !rlay.Params.IsLearnTrgAvg() { + if !rlay.IsLearnTrgAvg() { return } - tp := &rlay.Params.Learn.TrgAvgAct - lr := tp.SynScaleRate - for lni := uint32(0); lni < rlay.NNeurons; lni++ { - ri := rlay.NeurStIndex + lni - if NeuronIsOff(ri) { - continue - } - adif := -lr * NeuronAvgs[ri, AvgDif] - syIndexes := pj.RecvSynIxs(lni) - for _, syi := range syIndexes { - syni := pj.SynStIndex + syi - lwt := Synapses[syni, LWt] - swt := Synapses[syni, SWt] - if adif >= 0 { // key to have soft bounding on lwt here! - Synapses[syni, LWt] += (1 - lwt) * adif * swt - } else { - Synapses[syni, LWt] += lwt * adif * swt - } - Synapses[syni, Wt] = pj.Params.SWts.WtValue(swt, Synapses[syni, LWt]) + lr := rlay.Learn.TrgAvgAct.SynScaleRate + + cni := pt.Indexes.RecvConSt + lni + synn := PathRecvCon[cni, Nitems] + synst := pt.Indexes.RecvSynSt + PathRecvCon[cni, StartOff] + adif := -lr * NeuronAvgs[ri, AvgDif] + for ci := uint32(0); ci < synn; ci++ { + syni := RecvSynIxs.Value(int(synst + ci)) + lwt := Synapses[syni, LWt] + swt := Synapses[syni, SWt] + if adif >= 0 { // key to have soft bounding on lwt here! + Synapses[syni, LWt] += (1 - lwt) * adif * swt + } else { + Synapses[syni, LWt] += lwt * adif * swt } + Synapses[syni, Wt] = pt.SWts.WtValue(swt, Synapses[syni, LWt]) } } +//gosl:end + // LRateMod sets the LRate modulation parameter for Paths, which is // for dynamic modulation of learning rate (see also LRateSched). // Updates the effective learning rate factor accordingly. -func (pj *Path) LRateMod(mod float32) { - pj.Params.Learn.LRate.Mod = mod - pj.Params.Learn.LRate.Update() +func (pt *Path) LRateMod(mod float32) { + pt.Params.Learn.LRate.Mod = mod + pt.Params.Learn.LRate.Update() } // LRateSched sets the schedule-based learning rate multiplier. // See also LRateMod. // Updates the effective learning rate factor accordingly. -func (pj *Path) LRateSched(sched float32) { - pj.Params.Learn.LRate.Sched = sched - pj.Params.Learn.LRate.Update() +func (pt *Path) LRateSched(sched float32) { + pt.Params.Learn.LRate.Sched = sched + pt.Params.Learn.LRate.Update() } diff --git a/axon/learn.go b/axon/learn.go index d4a81c91..567da948 100644 --- a/axon/learn.go +++ b/axon/learn.go @@ -545,19 +545,19 @@ func (sp *SWtParams) SigFromLinWt(lw float32) float32 { // wt is centered at 1, and normed in range +/- 1 around that, // return value is in 0-1 range, centered at .5 func (sp *SWtParams) LinFromSigWt(wt float32) float32 { - wt *= 0.5 - if wt < 0 { - wt = 0 - } else if wt > 1 { - wt = 1 + wte := wt * 0.5 + if wte < 0 { + wte = 0 + } else if wte > 1 { + wte = 1 } if sp.Adapt.SigGain == 1 { - return wt + return wte } if sp.Adapt.SigGain == 6 { - return SigInvFun61(wt) + return SigInvFun61(wte) } - return SigInvFun(wt, sp.Adapt.SigGain, 1) + return SigInvFun(wte, sp.Adapt.SigGain, 1) } // LWtFromWts returns linear, learning LWt from wt and swt. @@ -803,7 +803,7 @@ type LearnSynParams struct { Trace TraceParams `display:"inline"` // kinase calcium Ca integration parameters: using linear regression parameters - KinaseCa kinase.SynCaLinear `display:"inline"` + KinaseCa kinase.SynCaLinear `display:"no-inline"` // hebbian learning option, which overrides the default learning rules Hebb HebbParams `display:"inline"` diff --git a/axon/learn.goal b/axon/learn.goal index 7da47ee0..28724da3 100644 --- a/axon/learn.goal +++ b/axon/learn.goal @@ -543,19 +543,19 @@ func (sp *SWtParams) SigFromLinWt(lw float32) float32 { // wt is centered at 1, and normed in range +/- 1 around that, // return value is in 0-1 range, centered at .5 func (sp *SWtParams) LinFromSigWt(wt float32) float32 { - wt *= 0.5 - if wt < 0 { - wt = 0 - } else if wt > 1 { - wt = 1 + wte := wt * 0.5 + if wte < 0 { + wte = 0 + } else if wte > 1 { + wte = 1 } if sp.Adapt.SigGain == 1 { - return wt + return wte } if sp.Adapt.SigGain == 6 { - return SigInvFun61(wt) + return SigInvFun61(wte) } - return SigInvFun(wt, sp.Adapt.SigGain, 1) + return SigInvFun(wte, sp.Adapt.SigGain, 1) } // LWtFromWts returns linear, learning LWt from wt and swt. @@ -801,7 +801,7 @@ type LearnSynParams struct { Trace TraceParams `display:"inline"` // kinase calcium Ca integration parameters: using linear regression parameters - KinaseCa kinase.SynCaLinear `display:"inline"` + KinaseCa kinase.SynCaLinear `display:"no-inline"` // hebbian learning option, which overrides the default learning rules Hebb HebbParams `display:"inline"` diff --git a/axon/shaders/DWtSubMeanPath.wgsl b/axon/shaders/DWtSubMeanNeuron.wgsl similarity index 96% rename from axon/shaders/DWtSubMeanPath.wgsl rename to axon/shaders/DWtSubMeanNeuron.wgsl index fadc1834..67906b8d 100644 --- a/axon/shaders/DWtSubMeanPath.wgsl +++ b/axon/shaders/DWtSubMeanNeuron.wgsl @@ -1,5 +1,5 @@ // Code generated by "gosl"; DO NOT EDIT -// kernel: DWtSubMeanPath +// kernel: DWtSubMeanNeuron // // Layers are all the layer parameters. @group(0) @binding(0) @@ -54,7 +54,7 @@ alias GPUVars = i32; @compute @workgroup_size(64, 1, 1) fn main(@builtin(global_invocation_id) idx: vec3) { - DWtSubMeanPath(idx.x); + DWtSubMeanNeuron(idx.x); } fn IndexU322D(s0: u32, s1: u32, i0: u32, i1: u32) -> u32 { @@ -705,16 +705,25 @@ const LayerRewPredPos: LayerVars = 9; const LayerRewPredNeg: LayerVars = 10; ///////////// import: "learn-layer.go" +fn LayerParams_DWtSubMean(ly: ptr, ctx: ptr, ri: u32) { + var lni = ri - (*ly).Indexes.NeurSt; + var rn = (*ly).Indexes.RecvN; + for (var pi = u32(0); pi < rn; pi++) { + var pti = RecvPathIxs[IndexU321D(RecvPathIxs[0], u32((*ly).Indexes.RecvSt + pi))]; + var paths=Paths[pti]; PathParams_DWtSubMean(&paths, ctx, pti, ri, lni); + } +} ///////////// import: "learn-net.go" -fn DWtSubMeanPath(pti: u32) { //gosl:kernel +fn DWtSubMeanNeuron(ni: u32) { //gosl:kernel var ctx = Ctx[0]; - var paths=Paths[pti]; PathParams_DWtSubMean(&paths, &ctx, pti); + var li = NeuronIxs[IndexU322D(NeuronIxs[0], NeuronIxs[1], u32(ni),u32(NrnLayIndex))]; + var layers=Layers[li]; LayerParams_DWtSubMean(&layers, &ctx, ni); Ctx[0] = ctx; } ///////////// import: "learn-path.go" -fn PathParams_DWtSubMean(pt: ptr, ctx: ptr, pti: u32) { +fn PathParams_DWtSubMean(pt: ptr, ctx: ptr, pti: u32,ri: u32,lni: u32) { if ((*pt).Learn.Learn == 0) { return; } @@ -722,18 +731,16 @@ fn PathParams_DWtSubMean(pt: ptr, ctx: ptr, ctx: ptr Layers: array; +@group(0) @binding(1) +var Paths: array; +// // NetworkIxs have indexes and sizes for entire network (one only). +@group(1) @binding(0) +var NetworkIxs: array; +@group(1) @binding(1) +var NeuronIxs: array; +@group(1) @binding(2) +var SynapseIxs: array; +@group(1) @binding(3) +var PathSendCon: array; +@group(1) @binding(4) +var RecvPathIxs: array; +@group(1) @binding(5) +var PathRecvCon: array; +@group(1) @binding(6) +var RecvSynIxs: array; +// // Ctx is the current context state (one only). +@group(2) @binding(0) +var Ctx: array; +@group(2) @binding(1) +var Neurons: array; +@group(2) @binding(2) +var NeuronAvgs: array; +@group(2) @binding(3) +var LayerStates: array; +@group(2) @binding(4) +var GlobalScalars: array; +@group(2) @binding(5) +var GlobalVectors: array; +@group(2) @binding(6) +var Exts: array; +// // Pools are the [PoolVars] float32 state values for layer and sub-pool inhibition, // Including the float32 AvgMax values by Phase and variable: use [AvgMaxVarIndex]. // [Layer * Pools][PoolVars+AvgMax][Data] +@group(3) @binding(0) +var Pools: array; +@group(3) @binding(1) +var PoolsInt: array>; +@group(3) @binding(2) +var PathGBuf: array; +@group(3) @binding(3) +var PathGSyns: array; +@group(3) @binding(4) +var Synapses: array; +@group(3) @binding(5) +var SynapseTraces: array; + +alias GPUVars = i32; + +@compute @workgroup_size(64, 1, 1) +fn main(@builtin(global_invocation_id) idx: vec3) { + SlowAdaptLayer(idx.x); +} + +fn IndexU322D(s0: u32, s1: u32, i0: u32, i1: u32) -> u32 { + return u32(2) + s0 * i0 + s1 * i1; +} + +fn IndexU321D(s0: u32, i0: u32) -> u32 { + return u32(1) + s0 * i0; +} + +fn IndexF323D(s0: f32, s1: f32, s2: f32, i0: u32, i1: u32, i2: u32) -> u32 { + return u32(3) + bitcast(s0) * i0 + bitcast(s1) * i1 + bitcast(s2) * i2; +} + +fn IndexF322D(s0: f32, s1: f32, i0: u32, i1: u32) -> u32 { + return u32(2) + bitcast(s0) * i0 + bitcast(s1) * i1; +} + +fn IndexI323D(s0: i32, s1: i32, s2: i32, i0: u32, i1: u32, i2: u32) -> u32 { + return u32(3) + u32(s0) * i0 + u32(s1) * i1 + u32(s2) * i2; +} + + +///////////// import: "vars.go" + +///////////// import: "act-layer.go" +fn LayerParams_IsInput(ly: ptr) -> bool { + switch ((*ly).Type) { + case InputLayer: { + return true; + } + default: { + return false; + } + } +} + +///////////// import: "act-net.go" + +///////////// import: "act-path.go" +alias PathGTypes = i32; //enums:enum +const ExcitatoryG: PathGTypes = 0; +const InhibitoryG: PathGTypes = 1; +const ModulatoryG: PathGTypes = 2; +const MaintG: PathGTypes = 3; +const ContextG: PathGTypes = 4; +struct SynComParams { + GType: PathGTypes, + Delay: u32, + MaxDelay: u32, + DelLen: u32, +} +struct PathScaleParams { + Rel: f32, + Abs: f32, + pad: f32, + pad1: f32, +} + +///////////// import: "act.go" +fn NeuronHasFlag(flag: NeuronFlags, ni: u32,di: u32) -> bool { + return (NeuronFlags(bitcast(Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NeurFlags))])) & flag) > 0; // weird: != 0 does NOT work on GPU +} +fn NeuronIsOff(ni: u32) -> bool { + return NeuronHasFlag(NeuronOff, ni, u32(u32(0))); +} +struct SpikeParams { + Thr: f32, + VmR: f32, + Tr: i32, + RTau: f32, + Exp: i32, + ExpSlope: f32, + ExpThr: f32, + MaxHz: f32, + ISITau: f32, + ISIDt: f32, + RDt: f32, + pad: i32, +} +struct DendParams { + GbarExp: f32, + GbarR: f32, + SSGi: f32, + HasMod: i32, + ModGain: f32, + ModACh: i32, + ModBase: f32, + pad: i32, +} +struct ActInitParams { + Vm: f32, + Act: f32, + GeBase: f32, + GiBase: f32, + GeVar: f32, + GiVar: f32, + pad: i32, + pad1: i32, +} +struct DecayParams { + Act: f32, + Glong: f32, + AHP: f32, + LearnCa: f32, + OnRew: i32, + pad: f32, + pad1: f32, + pad2: f32, +} +struct DtParams { + Integ: f32, + VmTau: f32, + VmDendTau: f32, + VmSteps: i32, + GeTau: f32, + GiTau: f32, + IntTau: f32, + LongAvgTau: f32, + MaxCycStart: i32, + VmDt: f32, + VmDendDt: f32, + DtStep: f32, + GeDt: f32, + GiDt: f32, + IntDt: f32, + LongAvgDt: f32, +} +struct SpikeNoiseParams { + On: i32, + GeHz: f32, + Ge: f32, + GiHz: f32, + Gi: f32, + MaintGe: i32, + GeExpInt: f32, + GiExpInt: f32, +} +struct ClampParams { + IsInput: i32, + IsTarget: i32, + Ge: f32, + Add: i32, + ErrThr: f32, + pad: f32, + pad1: f32, + pad2: f32, +} +struct SMaintParams { + On: i32, + NNeurons: f32, + Gbar: f32, + Inhib: f32, + ISI: F32, +} +struct PopCodeParams { + On: i32, + Ge: f32, + Min: f32, + Max: f32, + MinAct: f32, + MinSigma: f32, + MaxSigma: f32, + Clip: i32, +} +struct ActParams { + Spikes: SpikeParams, + Dend: DendParams, + Init: ActInitParams, + Decay: DecayParams, + Dt: DtParams, + Gbar: Chans, + Erev: Chans, + Clamp: ClampParams, + Noise: SpikeNoiseParams, + VmRange: F32, + Mahp: MahpParams, + Sahp: SahpParams, + KNa: KNaMedSlow, + Kir: KirParams, + NMDA: NMDAParams, + MaintNMDA: NMDAParams, + GabaB: GABABParams, + VGCC: VGCCParams, + AK: AKsParams, + SKCa: SKCaParams, + SMaint: SMaintParams, + PopCode: PopCodeParams, +} + +///////////// import: "chans-ak.go" +struct AKsParams { + Gbar: f32, + Hf: f32, + Mf: f32, + Voff: f32, + Vmax: f32, + pad: i32, + pad1: i32, + pad2: i32, +} + +///////////// import: "chans-chans.go" +struct Chans { + E: f32, + L: f32, + I: f32, + K: f32, +} + +///////////// import: "chans-gabab.go" +struct GABABParams { + Gbar: f32, + RiseTau: f32, + DecayTau: f32, + Gbase: f32, + GiSpike: f32, + MaxTime: f32, + TauFact: f32, + RiseDt: f32, + DecayDt: f32, + pad: f32, + pad1: f32, + pad2: f32, +} + +///////////// import: "chans-kir.go" +struct KirParams { + Gbar: f32, + MinfOff: f32, + MinfTau: f32, + RiseOff: f32, + RiseTau: f32, + DecayOff: f32, + DecayTau: f32, + Mrest: f32, +} + +///////////// import: "chans-kna.go" +struct KNaParams { + On: i32, + Rise: f32, + Max: f32, + Tau: f32, + Dt: f32, + pad: i32, + pad1: i32, + pad2: i32, +} +struct KNaMedSlow { + On: i32, + TrialSlow: i32, + pad: i32, + pad1: i32, + Med: KNaParams, + Slow: KNaParams, +} + +///////////// import: "chans-mahp.go" +struct MahpParams { + Gbar: f32, + Voff: f32, + Vslope: f32, + TauMax: f32, + Tadj: f32, + DtMax: f32, + pad: i32, + pad2: i32, +} + +///////////// import: "chans-nmda.go" +struct NMDAParams { + Gbar: f32, + Tau: f32, + ITau: f32, + MgC: f32, + Voff: f32, + Dt: f32, + IDt: f32, + MgFact: f32, +} + +///////////// import: "chans-sahp.go" +struct SahpParams { + Gbar: f32, + CaTau: f32, + Off: f32, + Slope: f32, + TauMax: f32, + CaDt: f32, + DtMax: f32, + pad: i32, +} + +///////////// import: "chans-skca.go" +struct SKCaParams { + Gbar: f32, + C50: f32, + ActTau: f32, + DeTau: f32, + KCaR: f32, + CaRDecayTau: f32, + CaInThr: f32, + CaInTau: f32, + ActDt: f32, + DeDt: f32, + CaRDecayDt: f32, + CaInDt: f32, +} + +///////////// import: "chans-vgcc.go" +struct VGCCParams { + Gbar: f32, + Ca: f32, + pad: i32, + pad1: i32, +} + +///////////// import: "context.go" +struct Context { + NData: u32, + Mode: i32, + Testing: i32, + Phase: i32, + PlusPhase: i32, + PhaseCycle: i32, + Cycle: i32, + ThetaCycles: i32, + CyclesTotal: i32, + Time: f32, + TrialsTotal: i32, + TimePerCycle: f32, + SlowInterval: i32, + SlowCounter: i32, + pad: i32, + pad1: i32, + RandCounter: RandCounter, +} + +///////////// import: "deep-layer.go" +struct BurstParams { + ThrRel: f32, + ThrAbs: f32, + pad: f32, + pad1: f32, +} +struct CTParams { + GeGain: f32, + DecayTau: f32, + OFCposPT: i32, + DecayDt: f32, +} +struct PulvParams { + DriveScale: f32, + FullDriveAct: f32, + DriveLayIndex: i32, + pad: f32, +} + +///////////// import: "deep-path.go" + +///////////// import: "enumgen.go" +const PathGTypesN: PathGTypes = 5; +const GlobalScalarVarsN: GlobalScalarVars = 57; +const GlobalVectorVarsN: GlobalVectorVars = 10; +const GPUVarsN: GPUVars = 22; +const LayerTypesN: LayerTypes = 30; +const LayerVarsN: LayerVars = 11; +const ViewTimesN: ViewTimes = 7; +const DAModTypesN: DAModTypes = 4; +const ValenceTypesN: ValenceTypes = 3; +const NeuronFlagsN: NeuronFlags = 9; +const NeuronVarsN: NeuronVars = 90; +const NeuronAvgVarsN: NeuronAvgVars = 7; +const NeuronIndexVarsN: NeuronIndexVars = 3; +const PathTypesN: PathTypes = 12; +const GPLayerTypesN: GPLayerTypes = 3; +const PoolIntVarsN: PoolIntVars = 10; +const AvgMaxN: AvgMax = 2; +const AvgMaxPhasesN: AvgMaxPhases = 4; +const AvgMaxVarsN: AvgMaxVars = 7; +const SynapseVarsN: SynapseVars = 5; +const SynapseTraceVarsN: SynapseTraceVars = 3; +const SynapseIndexVarsN: SynapseIndexVars = 3; + +///////////// import: "fsfffb-enumgen.go" +const InhibVarsN: InhibVars = 16; + +///////////// import: "fsfffb-fsfffb.go" +struct GiParams { + On: i32, + Gi: f32, + FB: f32, + FSTau: f32, + SS: f32, + SSfTau: f32, + SSiTau: f32, + FS0: f32, + FFAvgTau: f32, + FFPrv: f32, + ClampExtMin: f32, + FSDt: f32, + SSfDt: f32, + SSiDt: f32, + FFAvgDt: f32, + pad: f32, +} + +///////////// import: "fsfffb-inhib.go" +alias InhibVars = i32; //enums:enum +const FFsRaw: InhibVars = 0; +const FBsRaw: InhibVars = 1; +const GeExtRaw: InhibVars = 2; +const FFs: InhibVars = 3; +const FBs: InhibVars = 4; +const GeExts: InhibVars = 5; +const FSi: InhibVars = 6; +const SSi: InhibVars = 7; +const SSf: InhibVars = 8; +const FSGi: InhibVars = 9; +const SSGi: InhibVars = 10; +const TotalGi: InhibVars = 11; +const GiOrig: InhibVars = 12; +const LayGi: InhibVars = 13; +const FFAvg: InhibVars = 14; +const FFAvgPrv: InhibVars = 15; + +///////////// import: "globals.go" +alias GlobalScalarVars = i32; //enums:enum +const GvRew: GlobalScalarVars = 0; +const GvHasRew: GlobalScalarVars = 1; +const GvRewPred: GlobalScalarVars = 2; +const GvPrevPred: GlobalScalarVars = 3; +const GvHadRew: GlobalScalarVars = 4; +const GvDA: GlobalScalarVars = 5; +const GvDAtonic: GlobalScalarVars = 6; +const GvACh: GlobalScalarVars = 7; +const GvNE: GlobalScalarVars = 8; +const GvSer: GlobalScalarVars = 9; +const GvAChRaw: GlobalScalarVars = 10; +const GvGoalMaint: GlobalScalarVars = 11; +const GvVSMatrixJustGated: GlobalScalarVars = 12; +const GvVSMatrixHasGated: GlobalScalarVars = 13; +const GvCuriosityPoolGated: GlobalScalarVars = 14; +const GvTime: GlobalScalarVars = 15; +const GvEffort: GlobalScalarVars = 16; +const GvUrgencyRaw: GlobalScalarVars = 17; +const GvUrgency: GlobalScalarVars = 18; +const GvHasPosUS: GlobalScalarVars = 19; +const GvHadPosUS: GlobalScalarVars = 20; +const GvNegUSOutcome: GlobalScalarVars = 21; +const GvHadNegUSOutcome: GlobalScalarVars = 22; +const GvPVposSum: GlobalScalarVars = 23; +const GvPVpos: GlobalScalarVars = 24; +const GvPVnegSum: GlobalScalarVars = 25; +const GvPVneg: GlobalScalarVars = 26; +const GvPVposEst: GlobalScalarVars = 27; +const GvPVposVar: GlobalScalarVars = 28; +const GvPVnegEst: GlobalScalarVars = 29; +const GvPVnegVar: GlobalScalarVars = 30; +const GvGoalDistEst: GlobalScalarVars = 31; +const GvGoalDistPrev: GlobalScalarVars = 32; +const GvProgressRate: GlobalScalarVars = 33; +const GvGiveUpUtility: GlobalScalarVars = 34; +const GvContUtility: GlobalScalarVars = 35; +const GvGiveUpTiming: GlobalScalarVars = 36; +const GvContTiming: GlobalScalarVars = 37; +const GvGiveUpProgress: GlobalScalarVars = 38; +const GvContProgress: GlobalScalarVars = 39; +const GvGiveUpSum: GlobalScalarVars = 40; +const GvContSum: GlobalScalarVars = 41; +const GvGiveUpProb: GlobalScalarVars = 42; +const GvGiveUp: GlobalScalarVars = 43; +const GvGaveUp: GlobalScalarVars = 44; +const GvVSPatchPos: GlobalScalarVars = 45; +const GvVSPatchPosThr: GlobalScalarVars = 46; +const GvVSPatchPosRPE: GlobalScalarVars = 47; +const GvVSPatchPosSum: GlobalScalarVars = 48; +const GvVSPatchPosPrev: GlobalScalarVars = 49; +const GvVSPatchPosVar: GlobalScalarVars = 50; +const GvLHbDip: GlobalScalarVars = 51; +const GvLHbBurst: GlobalScalarVars = 52; +const GvLHbPVDA: GlobalScalarVars = 53; +const GvCeMpos: GlobalScalarVars = 54; +const GvCeMneg: GlobalScalarVars = 55; +const GvVtaDA: GlobalScalarVars = 56; +const MaxGlobalVecN = 16; +alias GlobalVectorVars = i32; //enums:enum +const GvCost: GlobalVectorVars = 0; +const GvCostRaw: GlobalVectorVars = 1; +const GvUSneg: GlobalVectorVars = 2; +const GvUSnegRaw: GlobalVectorVars = 3; +const GvDrives: GlobalVectorVars = 4; +const GvUSpos: GlobalVectorVars = 5; +const GvVSPatchD1: GlobalVectorVars = 6; +const GvVSPatchD2: GlobalVectorVars = 7; +const GvOFCposPTMaint: GlobalVectorVars = 8; +const GvVSMatrixPoolGated: GlobalVectorVars = 9; + +///////////// import: "hip_paths.go" +struct HipPathParams { + Hebb: f32, + Err: f32, + SAvgCor: f32, + SAvgThr: f32, + SNominal: f32, + pad: f32, + pad1: f32, + pad2: f32, +} + +///////////// import: "inhib.go" +struct ActAvgParams { + Nominal: f32, + AdaptGi: i32, + Offset: f32, + HiTol: f32, + LoTol: f32, + AdaptRate: f32, + pad: f32, + pad1: f32, +} +fn ActAvgParams_Adapt(aa: ptr, gimult: ptr, act: f32) -> bool { + var trg = (*aa).Nominal + (*aa).Offset; + var del = (act - trg) / trg; + if (del < -(*aa).LoTol || del > (*aa).HiTol) { + *gimult += (*aa).AdaptRate * del;return true; + }return false; +} +struct InhibParams { + ActAvg: ActAvgParams, + Layer: GiParams, + Pool: GiParams, +} + +///////////// import: "init-layer.go" + +///////////// import: "kinase-params.go" +struct CaDtParams { //types:add + MTau: f32, + PTau: f32, + DTau: f32, + MDt: f32, + PDt: f32, + DDt: f32, + pad: i32, + pad1: i32, +} +struct NeurCaParams { + SpikeG: f32, + SynTau: f32, + SynDt: f32, + pad: i32, + Dt: CaDtParams, +} +struct SynCaParams { //types:add + CaScale: f32, + pad: i32, + pad1: i32, + pad2: i32, + Dt: CaDtParams, +} +struct BinWeights { //types:add + Bin0: f32, + Bin1: f32, + Bin2: f32, + Bin3: f32, + Bin4: f32, + Bin5: f32, + Bin6: f32, + Bin7: f32, +} +struct SynCaLinear { //types:add + CaP: BinWeights, + CaD: BinWeights, + CaGain: f32, + pad: f32, + pad1: f32, + pad2: f32, +} + +///////////// import: "layerparams.go" +struct LayerIndexes { + NPools: u32, + NeurSt: u32, + NNeurons: u32, + RecvSt: u32, + RecvN: u32, + SendSt: u32, + SendN: u32, + ExtsSt: u32, + ShpPlY: i32, + ShpPlX: i32, + ShpUnY: i32, + ShpUnX: i32, +} +struct LayerInhibIndexes { + Index1: i32, + Index2: i32, + Index3: i32, + Index4: i32, +} +struct LayerParams { + Type: LayerTypes, + Index: u32, + MaxData: u32, + PoolSt: u32, + Acts: ActParams, + Inhib: InhibParams, + LayInhib: LayerInhibIndexes, + Learn: LearnNeurParams, + Bursts: BurstParams, + CT: CTParams, + Pulv: PulvParams, + Matrix: MatrixParams, + GP: GPParams, + LDT: LDTParams, + VTA: VTAParams, + RWPred: RWPredParams, + RWDa: RWDaParams, + TDInteg: TDIntegParams, + TDDa: TDDaParams, + Indexes: LayerIndexes, +} +fn LayerParams_PoolIndex(ly: ptr, pi: u32) -> u32 { + return (*ly).PoolSt + pi; +} + +///////////// import: "layertypes.go" +alias LayerTypes = i32; //enums:enum +const SuperLayer: LayerTypes = 0; +const InputLayer: LayerTypes = 1; +const TargetLayer: LayerTypes = 2; +const CompareLayer: LayerTypes = 3; +const CTLayer: LayerTypes = 4; +const PulvinarLayer: LayerTypes = 5; +const TRNLayer: LayerTypes = 6; +const PTMaintLayer: LayerTypes = 7; +const PTPredLayer: LayerTypes = 8; +const MatrixLayer: LayerTypes = 9; +const STNLayer: LayerTypes = 10; +const GPLayer: LayerTypes = 11; +const BGThalLayer: LayerTypes = 12; +const VSGatedLayer: LayerTypes = 13; +const BLALayer: LayerTypes = 14; +const CeMLayer: LayerTypes = 15; +const VSPatchLayer: LayerTypes = 16; +const LHbLayer: LayerTypes = 17; +const DrivesLayer: LayerTypes = 18; +const UrgencyLayer: LayerTypes = 19; +const USLayer: LayerTypes = 20; +const PVLayer: LayerTypes = 21; +const LDTLayer: LayerTypes = 22; +const VTALayer: LayerTypes = 23; +const RewLayer: LayerTypes = 24; +const RWPredLayer: LayerTypes = 25; +const RWDaLayer: LayerTypes = 26; +const TDPredLayer: LayerTypes = 27; +const TDIntegLayer: LayerTypes = 28; +const TDDaLayer: LayerTypes = 29; + +///////////// import: "layervars.go" +alias LayerVars = i32; //enums:enum +const LayerActMAvg: LayerVars = 0; +const LayerActPAvg: LayerVars = 1; +const LayerAvgMaxGeM: LayerVars = 2; +const LayerAvgMaxGiM: LayerVars = 3; +const LayerGiMult: LayerVars = 4; +const LayerPhaseDiff: LayerVars = 5; +const LayerPhaseDiffAvg: LayerVars = 6; +const LayerPhaseDiffVar: LayerVars = 7; +const LayerRT: LayerVars = 8; +const LayerRewPredPos: LayerVars = 9; +const LayerRewPredNeg: LayerVars = 10; + +///////////// import: "learn-layer.go" +fn LayerParams_SlowAdaptLayer(ly: ptr, ctx: ptr) { + LayerParams_AdaptInhib(ly, ctx); + LayerParams_AvgDifFromTrgAvg(ly, ctx); +} +fn LayerParams_AdaptInhib(ly: ptr, ctx: ptr) { + if ((*ly).Inhib.ActAvg.AdaptGi == 0 || LayerParams_IsInput(ly)) { + return; + } + for (var di = u32(0); di < (*ctx).NData; di++) { + var giMult = LayerStates[IndexF323D(LayerStates[0], LayerStates[1], LayerStates[2], u32((*ly).Index),u32(di),u32(LayerGiMult))]; + var avg = LayerStates[IndexF323D(LayerStates[0], LayerStates[1], LayerStates[2], u32((*ly).Index),u32(di),u32(LayerActMAvg))]; + ActAvgParams_Adapt(&(*ly).Inhib.ActAvg, &giMult, avg); + LayerStates[IndexF323D(LayerStates[0], LayerStates[1], LayerStates[2], u32((*ly).Index),u32(di),u32(LayerGiMult))] = giMult; + } +} +fn LayerParams_AvgDifFromTrgAvg(ly: ptr, ctx: ptr) { + var sp = u32(0); + if ((*ly).Indexes.NPools > 1) { + sp = u32(1); + } + var np = (*ly).Indexes.NPools; + for (var spi = sp; spi < np; spi++) { + var pi = LayerParams_PoolIndex(ly, spi); + var nsi = PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(0),u32(PoolNeurSt))]; + var nei = PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(0),u32(PoolNeurEd))]; + var plavg = f32(0); + var nn = 0; + for (var lni = nsi; lni < nei; lni++) { + var ni = (*ly).Indexes.NeurSt + u32(lni); + if (NeuronIsOff(ni)) { + continue; + } + plavg += NeuronAvgs[IndexF322D(NeuronAvgs[0], NeuronAvgs[1], u32(ni),u32(ActAvg))]; + nn++; + } + if (nn == 0) { + continue; + } + plavg /= f32(nn); + if (plavg < 0.0001) { // gets unstable below here + continue; + } + PoolAvgDifInit(pi, u32(u32(0))); + for (var lni = nsi; lni < nei; lni++) { + var ni = (*ly).Indexes.NeurSt + u32(lni); + if (NeuronIsOff(ni)) { + continue; + } + var apct = NeuronAvgs[IndexF322D(NeuronAvgs[0], NeuronAvgs[1], u32(ni),u32(ActAvg))] / plavg; + var adif = apct - NeuronAvgs[IndexF322D(NeuronAvgs[0], NeuronAvgs[1], u32(ni),u32(TrgAvg))]; + NeuronAvgs[IndexF322D(NeuronAvgs[0], NeuronAvgs[1], u32(ni),u32(AvgPct))] = apct; + NeuronAvgs[IndexF322D(NeuronAvgs[0], NeuronAvgs[1], u32(ni),u32(AvgDif))] = adif; + PoolAvgDifUpdate(pi, u32(u32(0)), abs(adif)); + } + PoolAvgDifCalc(pi, u32(u32(0))); + for (var di = u32(1); di < (*ctx).NData; di++) { // copy to other datas + Pools[IndexF323D(Pools[0], Pools[1], Pools[2], u32(pi),u32(di),u32(AvgMaxVarIndex(AMAvgDif, AMCycle, Avg)))] = Pools[IndexF323D(Pools[0], Pools[1], Pools[2], u32(pi),u32(0),u32(AvgMaxVarIndex(AMAvgDif, AMCycle, Avg)))]; + Pools[IndexF323D(Pools[0], Pools[1], Pools[2], u32(pi),u32(di),u32(AvgMaxVarIndex(AMAvgDif, AMCycle, Max)))] = Pools[IndexF323D(Pools[0], Pools[1], Pools[2], u32(pi),u32(0),u32(AvgMaxVarIndex(AMAvgDif, AMCycle, Max)))]; + } + } + if (sp == 1) { // update layer pool + var lpi = LayerParams_PoolIndex(ly, u32(u32(0))); + PoolAvgDifInit(lpi, u32(u32(0))); + var nsi = PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(lpi),u32(0),u32(PoolNeurSt))]; + var nei = PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(lpi),u32(0),u32(PoolNeurEd))]; + for (var lni = nsi; lni < nei; lni++) { + var ni = (*ly).Indexes.NeurSt + u32(lni); + if (NeuronIsOff(ni)) { + continue; + } + PoolAvgDifUpdate(lpi, u32(u32(0)), abs(NeuronAvgs[IndexF322D(NeuronAvgs[0], NeuronAvgs[1], u32(ni),u32(AvgDif))])); + } + PoolAvgDifCalc(lpi, u32(u32(0))); + for (var di = u32(1); di < (*ctx).NData; di++) { // copy to other datas + Pools[IndexF323D(Pools[0], Pools[1], Pools[2], u32(lpi),u32(di),u32(AvgMaxVarIndex(AMAvgDif, AMCycle, Avg)))] = Pools[IndexF323D(Pools[0], Pools[1], Pools[2], u32(lpi),u32(0),u32(AvgMaxVarIndex(AMAvgDif, AMCycle, Avg)))]; + Pools[IndexF323D(Pools[0], Pools[1], Pools[2], u32(lpi),u32(di),u32(AvgMaxVarIndex(AMAvgDif, AMCycle, Max)))] = Pools[IndexF323D(Pools[0], Pools[1], Pools[2], u32(lpi),u32(0),u32(AvgMaxVarIndex(AMAvgDif, AMCycle, Max)))]; + } + } +} + +///////////// import: "learn-net.go" +fn SlowAdaptLayer(li: u32) { //gosl:kernel + var ctx = Ctx[0]; + var layers=Layers[li]; LayerParams_SlowAdaptLayer(&layers, &ctx); + Ctx[0] = ctx; +} + +///////////// import: "learn-path.go" + +///////////// import: "learn.go" +struct CaLrnParams { + Norm: f32, + SpkVGCC: i32, + SpkVgccCa: f32, + VgccTau: f32, + Dt: CaDtParams, + UpdateThr: f32, + VgccDt: f32, + NormInv: f32, + pad: i32, +} +struct TrgAvgActParams { + GiBaseInit: f32, + RescaleOn: i32, + ErrLRate: f32, + SynScaleRate: f32, + SubMean: f32, + Permute: i32, + Pool: i32, + pad: i32, + TrgRange: F32, +} +struct RLRateParams { + On: i32, + SigmoidLinear: i32, + SigmoidMin: f32, + Diff: i32, + SpkThr: f32, + DiffThr: f32, + Min: f32, + pad: i32, +} +struct LearnNeurParams { + CaLearn: CaLrnParams, + CaSpk: NeurCaParams, + LrnNMDA: NMDAParams, + TrgAvgAct: TrgAvgActParams, + RLRate: RLRateParams, + NeuroMod: NeuroModParams, +} +struct SWtInitParams { + SPct: f32, + Mean: f32, + Var: f32, + Sym: i32, +} +struct SWtAdaptParams { + On: i32, + LRate: f32, + SubMean: f32, + SigGain: f32, +} +struct SWtParams { + Init: SWtInitParams, + Adapt: SWtAdaptParams, + Limit: F32, +} +struct LRateParams { + Base: f32, + Sched: f32, + Mod: f32, + Eff: f32, +} +struct TraceParams { + Tau: f32, + SubMean: f32, + LearnThr: f32, + Dt: f32, +} +struct LRateMod { + On: i32, + Base: f32, + pad: i32, + pad1: i32, + Range: F32, +} +struct HebbParams { + On: i32, + Up: f32, + Down: f32, + pad: f32, +} +struct LearnSynParams { + Learn: i32, + pad: i32, + pad1: i32, + pad2: i32, + LRate: LRateParams, + Trace: TraceParams, + KinaseCa: SynCaLinear, + Hebb: HebbParams, +} + +///////////// import: "looper.go" +alias ViewTimes = i32; //enums:enum +const Cycle: ViewTimes = 0; +const FastSpike: ViewTimes = 1; +const Gamma: ViewTimes = 2; +const Beta: ViewTimes = 3; +const Alpha: ViewTimes = 4; +const Phase: ViewTimes = 5; +const Theta: ViewTimes = 6; + +///////////// import: "math32-fastexp.go" + +///////////// import: "minmax-avgmax.go" +const MaxFloat32: f32 = 3.402823466e+38; +const MinFloat32: f32 = 1.175494351e-38; +struct AvgMax32 { + Avg: f32, + Max: f32, + Sum: f32, + MaxIndex: i32, + N: i32, + pad: i32, + pad1: i32, + pad2: i32, +} + +///////////// import: "minmax-minmax32.go" +struct F32 { + Min: f32, + Max: f32, + pad: i32, + pad1: i32, // for gpu use +} + +///////////// import: "network.go" +struct NetworkIndexes { + MaxData: u32, + MaxDelay: u32, + NLayers: u32, + NNeurons: u32, + NPools: u32, + NPaths: u32, + NSyns: u32, + RubiconNPosUSs: u32, + RubiconNCosts: u32, + RubiconNNegUSs: u32, + GPUMaxBuffFloats: u32, + GPUSynCaBanks: u32, +} + +///////////// import: "neuromod.go" +alias DAModTypes = i32; //enums:enum +const NoDAMod: DAModTypes = 0; +const D1Mod: DAModTypes = 1; +const D2Mod: DAModTypes = 2; +const D1AbsMod: DAModTypes = 3; +alias ValenceTypes = i32; //enums:enum +const Positive: ValenceTypes = 0; +const Negative: ValenceTypes = 1; +const Cost: ValenceTypes = 2; +struct NeuroModParams { + DAMod: DAModTypes, + Valence: ValenceTypes, + DAModGain: f32, + DALRateSign: i32, + DALRateMod: f32, + AChLRateMod: f32, + AChDisInhib: f32, + BurstGain: f32, + DipGain: f32, + pad: f32, + pad1: f32, + pad2: f32, +} + +///////////// import: "neuron.go" +alias NeuronFlags = i32; //enums:enum +const NeuronOff: NeuronFlags = 1; +const NeuronHasExt: NeuronFlags = 2; +const NeuronHasTarg: NeuronFlags = 4; +const NeuronHasCmpr: NeuronFlags = 8; +alias NeuronVars = i32; //enums:enum +const Spike: NeuronVars = 0; +const Spiked: NeuronVars = 1; +const Act: NeuronVars = 2; +const ActInt: NeuronVars = 3; +const Ge: NeuronVars = 4; +const Gi: NeuronVars = 5; +const Gk: NeuronVars = 6; +const Inet: NeuronVars = 7; +const Vm: NeuronVars = 8; +const VmDend: NeuronVars = 9; +const ISI: NeuronVars = 10; +const ISIAvg: NeuronVars = 11; +const Ext: NeuronVars = 12; +const Target: NeuronVars = 13; +const CaSpkM: NeuronVars = 14; +const CaSpkP: NeuronVars = 15; +const CaSpkD: NeuronVars = 16; +const CaSpkPM: NeuronVars = 17; +const CaLrn: NeuronVars = 18; +const NrnCaM: NeuronVars = 19; +const NrnCaP: NeuronVars = 20; +const NrnCaD: NeuronVars = 21; +const CaDiff: NeuronVars = 22; +const RLRate: NeuronVars = 23; +const GnmdaSyn: NeuronVars = 24; +const Gnmda: NeuronVars = 25; +const GnmdaLrn: NeuronVars = 26; +const GnmdaMaint: NeuronVars = 27; +const NmdaCa: NeuronVars = 28; +const Gvgcc: NeuronVars = 29; +const VgccM: NeuronVars = 30; +const VgccH: NeuronVars = 31; +const VgccCa: NeuronVars = 32; +const VgccCaInt: NeuronVars = 33; +const Burst: NeuronVars = 34; +const BurstPrv: NeuronVars = 35; +const CtxtGe: NeuronVars = 36; +const CtxtGeRaw: NeuronVars = 37; +const CtxtGeOrig: NeuronVars = 38; +const GgabaB: NeuronVars = 39; +const GABAB: NeuronVars = 40; +const GABABx: NeuronVars = 41; +const Gak: NeuronVars = 42; +const SSGiDend: NeuronVars = 43; +const GknaMed: NeuronVars = 44; +const GknaSlow: NeuronVars = 45; +const Gkir: NeuronVars = 46; +const KirM: NeuronVars = 47; +const Gsk: NeuronVars = 48; +const SKCaIn: NeuronVars = 49; +const SKCaR: NeuronVars = 50; +const SKCaM: NeuronVars = 51; +const Gmahp: NeuronVars = 52; +const MahpN: NeuronVars = 53; +const Gsahp: NeuronVars = 54; +const SahpCa: NeuronVars = 55; +const SahpN: NeuronVars = 56; +const ActM: NeuronVars = 57; +const ActP: NeuronVars = 58; +const SpkSt1: NeuronVars = 59; +const SpkSt2: NeuronVars = 60; +const SpkMax: NeuronVars = 61; +const SpkMaxCa: NeuronVars = 62; +const SpkBin0: NeuronVars = 63; +const SpkBin1: NeuronVars = 64; +const SpkBin2: NeuronVars = 65; +const SpkBin3: NeuronVars = 66; +const SpkBin4: NeuronVars = 67; +const SpkBin5: NeuronVars = 68; +const SpkBin6: NeuronVars = 69; +const SpkBin7: NeuronVars = 70; +const SpkPrv: NeuronVars = 71; +const GeNoise: NeuronVars = 72; +const GeNoiseP: NeuronVars = 73; +const GiNoise: NeuronVars = 74; +const GiNoiseP: NeuronVars = 75; +const GeExt: NeuronVars = 76; +const GeRaw: NeuronVars = 77; +const GeSyn: NeuronVars = 78; +const GiRaw: NeuronVars = 79; +const GiSyn: NeuronVars = 80; +const GeInt: NeuronVars = 81; +const GeIntNorm: NeuronVars = 82; +const GiInt: NeuronVars = 83; +const GModRaw: NeuronVars = 84; +const GModSyn: NeuronVars = 85; +const SMaintP: NeuronVars = 86; +const GMaintRaw: NeuronVars = 87; +const GMaintSyn: NeuronVars = 88; +const NeurFlags: NeuronVars = 89; +alias NeuronAvgVars = i32; //enums:enum +const ActAvg: NeuronAvgVars = 0; +const AvgPct: NeuronAvgVars = 1; +const TrgAvg: NeuronAvgVars = 2; +const DTrgAvg: NeuronAvgVars = 3; +const AvgDif: NeuronAvgVars = 4; +const GeBase: NeuronAvgVars = 5; +const GiBase: NeuronAvgVars = 6; +alias NeuronIndexVars = i32; //enums:enum +const NrnNeurIndex: NeuronIndexVars = 0; +const NrnLayIndex: NeuronIndexVars = 1; +const NrnSubPool: NeuronIndexVars = 2; + +///////////// import: "pathparams.go" +const StartOff: i32 = 0; +const Nitems: i32 = 1; +const StartNN: i32 = 2; +struct StartN { + Start: u32, + N: u32, + pad: u32, + pad1: u32, // todo: see if we can do without these? +} +struct PathIndexes { + RecvLayer: u32, + RecvNeurSt: u32, + RecvNeurN: u32, + SendLayer: u32, + SendNeurSt: u32, + SendNeurN: u32, + SynapseSt: u32, + SendConSt: u32, + RecvConSt: u32, + RecvSynSt: u32, + NPathNeurSt: u32, + pad: u32, +} +struct GScaleValues { + Scale: f32, + Rel: f32, + pad: f32, + pad1: f32, +} +struct PathParams { + Type: PathTypes, + Index: u32, + pad: i32, + pad1: i32, + Indexes: PathIndexes, + Com: SynComParams, + PathScale: PathScaleParams, + SWts: SWtParams, + Learn: LearnSynParams, + GScale: GScaleValues, + RLPred: RLPredPathParams, + Matrix: MatrixPathParams, + BLA: BLAPathParams, + Hip: HipPathParams, +} + +///////////// import: "pathtypes.go" +alias PathTypes = i32; //enums:enum +const ForwardPath: PathTypes = 0; +const BackPath: PathTypes = 1; +const LateralPath: PathTypes = 2; +const InhibPath: PathTypes = 3; +const CTCtxtPath: PathTypes = 4; +const RWPath: PathTypes = 5; +const TDPredPath: PathTypes = 6; +const BLAPath: PathTypes = 7; +const HipPath: PathTypes = 8; +const VSPatchPath: PathTypes = 9; +const VSMatrixPath: PathTypes = 10; +const DSMatrixPath: PathTypes = 11; + +///////////// import: "pcore-layer.go" +struct MatrixParams { + GateThr: f32, + IsVS: i32, + OtherMatrixIndex: i32, + ThalLay1Index: i32, + ThalLay2Index: i32, + ThalLay3Index: i32, + ThalLay4Index: i32, + ThalLay5Index: i32, + ThalLay6Index: i32, + pad: i32, + pad1: i32, + pad2: i32, +} +alias GPLayerTypes = i32; //enums:enum +const GPePr: GPLayerTypes = 0; +const GPeAk: GPLayerTypes = 1; +const GPi: GPLayerTypes = 2; +struct GPParams { + GPType: GPLayerTypes, + pad: u32, + pad1: u32, + pad2: u32, +} + +///////////// import: "pcore-path.go" +struct MatrixPathParams { + Credit: f32, + BasePF: f32, + Delta: f32, + VSRewLearn: i32, +} + +///////////// import: "pool.go" +alias PoolIntVars = i32; //enums:enum +const PoolNeurSt: PoolIntVars = 0; +const PoolNeurEd: PoolIntVars = 1; +const PoolLayerIdx: PoolIntVars = 2; +const PoolIsLayer: PoolIntVars = 3; +const Clamped: PoolIntVars = 4; +const PoolGated: PoolIntVars = 5; +const FFsRawInt: PoolIntVars = 6; +const FBsRawInt: PoolIntVars = 7; +const GeExtRawInt: PoolIntVars = 8; +const PoolIntAvgMaxStart: PoolIntVars = 9; +alias AvgMax = i32; //enums:enum +const Avg: AvgMax = 0; +const Max: AvgMax = 1; +alias AvgMaxPhases = i32; //enums:enum -trim-prefix AM +const AMCycle: AvgMaxPhases = 0; +const AMMinus: AvgMaxPhases = 1; +const AMPlus: AvgMaxPhases = 2; +const AMPrev: AvgMaxPhases = 3; +alias AvgMaxVars = i32; //enums:enum -trim-prefix AM +const AMCaSpkP: AvgMaxVars = 0; +const AMCaSpkD: AvgMaxVars = 1; +const AMSpkMax: AvgMaxVars = 2; +const AMAct: AvgMaxVars = 3; +const AMGeInt: AvgMaxVars = 4; +const AMGiInt: AvgMaxVars = 5; +const AMAvgDif: AvgMaxVars = 6; +const poolFloatAvgMaxStart = InhibVarsN; +const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN)); +const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN)); +const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt); +fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 { + return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am); +} +fn AvgMaxIntVarIndex(vr: AvgMaxVars, am: AvgMax) -> u32 { + return u32(PoolIntAvgMaxStart) + u32(vr)*u32(AvgMaxN) + u32(am); +} +fn PoolNNeurons(pi: u32) -> i32 { + return PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(0),u32(PoolNeurEd))] - PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[ + 2], u32(pi),u32(0),u32(PoolNeurSt))]; +} +fn PoolAvgMaxUpdateVar(vr: AvgMaxVars, pi: u32,di: u32, val: f32) { + var n = f32(PoolNNeurons(pi)); + var floatToInt = f32(u32(1) << 20); + var floatToSum = floatToInt / n; + var vis = AvgMaxIntVarIndex(vr, Avg); + var vim = AvgMaxIntVarIndex(vr, Max); + atomicAdd(&PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(vis))], i32(val*floatToSum)); + atomicMax(&PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(vim))], i32(val*floatToInt)); +} +fn PoolAvgMaxCalcVar(vr: AvgMaxVars, pi: u32,di: u32) { + var floatFromInt = f32(1.0) / f32(u32(1)<<20); + var vis = AvgMaxIntVarIndex(vr, Avg); + var sum = PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(vis))]; + if (sum < 0) { + sum = i32(u32(1) << 20); + } + Pools[IndexF323D(Pools[0], Pools[1], Pools[2], u32(pi),u32(di),u32(AvgMaxVarIndex(vr, AMCycle, Avg)))] = f32(sum) * floatFromInt; + PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(vis))] = 0; + var vim = AvgMaxIntVarIndex(vr, Max); + var mx = PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(vim))]; + if (mx < 0) { + mx = i32(u32(1) << 20); + } + PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(vim))] = 0; + Pools[IndexF323D(Pools[0], Pools[1], Pools[2], u32(pi),u32(di),u32(AvgMaxVarIndex(vr, AMCycle, Max)))] = f32(mx) * floatFromInt; +} +fn PoolAvgDifInit(pi: u32,di: u32) { + PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(AvgMaxIntVarIndex(AMAvgDif, Avg)))] = 0; + PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(AvgMaxIntVarIndex(AMAvgDif, Max)))] = 0; +} +fn PoolAvgDifUpdate(pi: u32,di: u32, avdif: f32) { + PoolAvgMaxUpdateVar(AMAvgDif, pi, di, avdif); +} +fn PoolAvgDifCalc(pi: u32,di: u32) { + PoolAvgMaxCalcVar(AMAvgDif, pi, di); +} + +///////////// import: "rand.go" +alias RandFunIndex = u32; +const RandFunActPGe: RandFunIndex = 0; +const RandFunActPGi: RandFunIndex = 1; +const RandFunActSMaintP: RandFunIndex = 2; +const RandFunIndexN: RandFunIndex = 3; + +///////////// import: "rl-layer.go" +struct RWPredParams { + PredRange: F32, +} +struct RWDaParams { + TonicGe: f32, + RWPredLayIndex: i32, + pad: u32, + pad1: u32, +} +struct TDIntegParams { + Discount: f32, + PredGain: f32, + TDPredLayIndex: i32, + pad: u32, +} +struct TDDaParams { + TonicGe: f32, + TDIntegLayIndex: i32, + pad: u32, + pad1: u32, +} + +///////////// import: "rl-path.go" +struct RLPredPathParams { + OppSignLRate: f32, + DaTol: f32, + pad: f32, + pad1: f32, +} + +///////////// import: "rubicon-layer.go" +struct LDTParams { + SrcThr: f32, + Rew: i32, + MaintInhib: f32, + SrcLay1Index: i32, + SrcLay2Index: i32, + SrcLay3Index: i32, + SrcLay4Index: i32, + pad: f32, +} +struct VTAParams { + CeMGain: f32, + LHbGain: f32, + AChThr: f32, + pad: f32, +} + +///////////// import: "rubicon-path.go" +struct BLAPathParams { + NegDeltaLRate: f32, + AChThr: f32, + USTrace: f32, + pad: f32, +} + +///////////// import: "rubicon.go" + +///////////// import: "stats.go" + +///////////// import: "synapse.go" +alias SynapseVars = i32; //enums:enum +const Wt: SynapseVars = 0; +const LWt: SynapseVars = 1; +const SWt: SynapseVars = 2; +const DWt: SynapseVars = 3; +const DSWt: SynapseVars = 4; +alias SynapseTraceVars = i32; //enums:enum +const Tr: SynapseTraceVars = 0; +const DTr: SynapseTraceVars = 1; +const DiDWt: SynapseTraceVars = 2; +alias SynapseIndexVars = i32; //enums:enum +const SynRecvIndex: SynapseIndexVars = 0; +const SynSendIndex: SynapseIndexVars = 1; +const SynPathIndex: SynapseIndexVars = 2; + +///////////// import: "slrand.wgsl" +fn Philox2x32round(counter: su64, key: u32) -> su64 { + let mul = Uint32Mul64(u32(0xD256D193), counter.x); + var ctr: su64; + ctr.x = mul.y ^ key ^ counter.y; + ctr.y = mul.x; + return ctr; +} +fn Philox2x32bumpkey(key: u32) -> u32 { + return key + u32(0x9E3779B9); +} +fn Philox2x32(counter: su64, key: u32) -> vec2 { + var ctr = Philox2x32round(counter, key); // 1 + var ky = Philox2x32bumpkey(key); + ctr = Philox2x32round(ctr, ky); // 2 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 3 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 4 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 5 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 6 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 7 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 8 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 9 + ky = Philox2x32bumpkey(ky); + return Philox2x32round(ctr, ky); // 10 +} +fn RandUint32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { + return Philox2x32(Uint64Add32(counter, funcIndex), key); +} +fn RandUint32(counter: su64, funcIndex: u32, key: u32) -> u32 { + return Philox2x32(Uint64Add32(counter, funcIndex), key).x; +} +fn RandFloat32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { + return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); +} +fn RandFloat32(counter: su64, funcIndex: u32, key: u32) -> f32 { + return Uint32ToFloat32(RandUint32(counter, funcIndex, key)); +} +fn RandFloat32Range11Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { + return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); +} +fn RandFloat32Range11(counter: su64, funcIndex: u32, key: u32) -> f32 { + return Uint32ToFloat32Range11(RandUint32(counter, funcIndex, key)); +} +fn RandBoolP(counter: su64, funcIndex: u32, key: u32, p: f32) -> bool { + return (RandFloat32(counter, funcIndex, key) < p); +} +fn sincospi(x: f32) -> vec2 { + let PIf = 3.1415926535897932; + var r: vec2; + r.x = cos(PIf*x); + r.y = sin(PIf*x); + return r; +} +fn RandFloat32NormVec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { + let ur = RandUint32Vec2(counter, funcIndex, key); + var f = sincospi(Uint32ToFloat32Range11(ur.x)); + let r = sqrt(-2.0 * log(Uint32ToFloat32(ur.y))); // guaranteed to avoid 0. + return f * r; +} +fn RandFloat32Norm(counter: su64, funcIndex: u32, key: u32) -> f32 { + return RandFloat32Vec2(counter, funcIndex, key).x; +} +fn RandUint32N(counter: su64, funcIndex: u32, key: u32, n: u32) -> u32 { + let v = RandFloat32(counter, funcIndex, key); + return u32(v * f32(n)); +} +struct RandCounter { + Counter: su64, + HiSeed: u32, + pad: u32, +} +fn RandCounter_Reset(ct: ptr) { + (*ct).Counter.x = u32(0); + (*ct).Counter.y = (*ct).HiSeed; +} +fn RandCounter_Seed(ct: ptr, seed: u32) { + (*ct).HiSeed = seed; + RandCounter_Reset(ct); +} +fn RandCounter_Add(ct: ptr, inc: u32) { + (*ct).Counter = Uint64Add32((*ct).Counter, inc); +} + +///////////// import: "sltype.wgsl" +alias su64 = vec2; +fn Uint32Mul64(a: u32, b: u32) -> su64 { + let LOMASK = (((u32(1))<<16)-1); + var r: su64; + r.x = a * b; /* full low multiply */ + let ahi = a >> 16; + let alo = a & LOMASK; + let bhi = b >> 16; + let blo = b & LOMASK; + let ahbl = ahi * blo; + let albh = alo * bhi; + let ahbl_albh = ((ahbl&LOMASK) + (albh&LOMASK)); + var hit = ahi*bhi + (ahbl>>16) + (albh>>16); + hit += ahbl_albh >> 16; /* carry from the sum of lo(ahbl) + lo(albh) ) */ + /* carry from the sum with alo*blo */ + if ((r.x >> u32(16)) < (ahbl_albh&LOMASK)) { + hit += u32(1); + } + r.y = hit; + return r; +} +/* +fn Uint32Mul64(a: u32, b: u32) -> su64 { + return su64(a) * su64(b); +} +*/ +fn Uint64Add32(a: su64, b: u32) -> su64 { + if (b == 0) { + return a; + } + var s = a; + if (s.x > u32(0xffffffff) - b) { + s.y++; + s.x = (b - 1) - (u32(0xffffffff) - s.x); + } else { + s.x += b; + } + return s; +} +fn Uint64Incr(a: su64) -> su64 { + var s = a; + if(s.x == 0xffffffff) { + s.y++; + s.x = u32(0); + } else { + s.x++; + } + return s; +} +fn Uint32ToFloat32(val: u32) -> f32 { + let factor = f32(1.0) / (f32(u32(0xffffffff)) + f32(1.0)); + let halffactor = f32(0.5) * factor; + var f = f32(val) * factor + halffactor; + if (f == 1.0) { // exclude 1 + return bitcast(0x3F7FFFFF); + } + return f; +} +fn Uint32ToFloat32Vec2(val: vec2) -> vec2 { + var r: vec2; + r.x = Uint32ToFloat32(val.x); + r.y = Uint32ToFloat32(val.y); + return r; +} +fn Uint32ToFloat32Range11(val: u32) -> f32 { + let factor = f32(1.0) / (f32(i32(0x7fffffff)) + f32(1.0)); + let halffactor = f32(0.5) * factor; + return (f32(val) * factor + halffactor); +} +fn Uint32ToFloat32Range11Vec2(val: vec2) -> vec2 { + var r: vec2; + r.x = Uint32ToFloat32Range11(val.x); + r.y = Uint32ToFloat32Range11(val.y); + return r; +} \ No newline at end of file diff --git a/axon/shaders/SlowAdaptNeuron.wgsl b/axon/shaders/SlowAdaptNeuron.wgsl new file mode 100644 index 00000000..95804d02 --- /dev/null +++ b/axon/shaders/SlowAdaptNeuron.wgsl @@ -0,0 +1,1533 @@ +// Code generated by "gosl"; DO NOT EDIT +// kernel: SlowAdaptNeuron + +// // Layers are all the layer parameters. +@group(0) @binding(0) +var Layers: array; +@group(0) @binding(1) +var Paths: array; +// // NetworkIxs have indexes and sizes for entire network (one only). +@group(1) @binding(0) +var NetworkIxs: array; +@group(1) @binding(1) +var NeuronIxs: array; +@group(1) @binding(2) +var SynapseIxs: array; +@group(1) @binding(3) +var PathSendCon: array; +@group(1) @binding(4) +var RecvPathIxs: array; +@group(1) @binding(5) +var PathRecvCon: array; +@group(1) @binding(6) +var RecvSynIxs: array; +// // Ctx is the current context state (one only). +@group(2) @binding(0) +var Ctx: array; +@group(2) @binding(1) +var Neurons: array; +@group(2) @binding(2) +var NeuronAvgs: array; +@group(2) @binding(3) +var LayerStates: array; +@group(2) @binding(4) +var GlobalScalars: array; +@group(2) @binding(5) +var GlobalVectors: array; +@group(2) @binding(6) +var Exts: array; +// // Pools are the [PoolVars] float32 state values for layer and sub-pool inhibition, // Including the float32 AvgMax values by Phase and variable: use [AvgMaxVarIndex]. // [Layer * Pools][PoolVars+AvgMax][Data] +@group(3) @binding(0) +var Pools: array; +@group(3) @binding(1) +var PoolsInt: array; +@group(3) @binding(2) +var PathGBuf: array; +@group(3) @binding(3) +var PathGSyns: array; +@group(3) @binding(4) +var Synapses: array; +@group(3) @binding(5) +var SynapseTraces: array; + +alias GPUVars = i32; + +@compute @workgroup_size(64, 1, 1) +fn main(@builtin(global_invocation_id) idx: vec3) { + SlowAdaptNeuron(idx.x); +} + +fn IndexU322D(s0: u32, s1: u32, i0: u32, i1: u32) -> u32 { + return u32(2) + s0 * i0 + s1 * i1; +} + +fn IndexU321D(s0: u32, i0: u32) -> u32 { + return u32(1) + s0 * i0; +} + +fn IndexF323D(s0: f32, s1: f32, s2: f32, i0: u32, i1: u32, i2: u32) -> u32 { + return u32(3) + bitcast(s0) * i0 + bitcast(s1) * i1 + bitcast(s2) * i2; +} + +fn IndexF322D(s0: f32, s1: f32, i0: u32, i1: u32) -> u32 { + return u32(2) + bitcast(s0) * i0 + bitcast(s1) * i1; +} + +fn IndexI323D(s0: i32, s1: i32, s2: i32, i0: u32, i1: u32, i2: u32) -> u32 { + return u32(3) + u32(s0) * i0 + u32(s1) * i1 + u32(s2) * i2; +} + + +///////////// import: "vars.go" + +///////////// import: "act-layer.go" +fn LayerParams_IsTarget(ly: ptr) -> bool { + switch ((*ly).Type) { + case TargetLayer: { + return true; + } + case PulvinarLayer: { + return true; + } + default: { + return false; + } + } +} +fn LayerParams_IsLearnTrgAvg(ly: ptr) -> bool { + if ((*ly).Acts.Clamp.IsInput == 1 || (*ly).Acts.Clamp.IsTarget == 1 || (*ly).Learn.TrgAvgAct.RescaleOn == 0) { + return false; + }return true; +} + +///////////// import: "act-net.go" + +///////////// import: "act-path.go" +alias PathGTypes = i32; //enums:enum +const ExcitatoryG: PathGTypes = 0; +const InhibitoryG: PathGTypes = 1; +const ModulatoryG: PathGTypes = 2; +const MaintG: PathGTypes = 3; +const ContextG: PathGTypes = 4; +struct SynComParams { + GType: PathGTypes, + Delay: u32, + MaxDelay: u32, + DelLen: u32, +} +struct PathScaleParams { + Rel: f32, + Abs: f32, + pad: f32, + pad1: f32, +} + +///////////// import: "act.go" +struct SpikeParams { + Thr: f32, + VmR: f32, + Tr: i32, + RTau: f32, + Exp: i32, + ExpSlope: f32, + ExpThr: f32, + MaxHz: f32, + ISITau: f32, + ISIDt: f32, + RDt: f32, + pad: i32, +} +struct DendParams { + GbarExp: f32, + GbarR: f32, + SSGi: f32, + HasMod: i32, + ModGain: f32, + ModACh: i32, + ModBase: f32, + pad: i32, +} +struct ActInitParams { + Vm: f32, + Act: f32, + GeBase: f32, + GiBase: f32, + GeVar: f32, + GiVar: f32, + pad: i32, + pad1: i32, +} +struct DecayParams { + Act: f32, + Glong: f32, + AHP: f32, + LearnCa: f32, + OnRew: i32, + pad: f32, + pad1: f32, + pad2: f32, +} +struct DtParams { + Integ: f32, + VmTau: f32, + VmDendTau: f32, + VmSteps: i32, + GeTau: f32, + GiTau: f32, + IntTau: f32, + LongAvgTau: f32, + MaxCycStart: i32, + VmDt: f32, + VmDendDt: f32, + DtStep: f32, + GeDt: f32, + GiDt: f32, + IntDt: f32, + LongAvgDt: f32, +} +struct SpikeNoiseParams { + On: i32, + GeHz: f32, + Ge: f32, + GiHz: f32, + Gi: f32, + MaintGe: i32, + GeExpInt: f32, + GiExpInt: f32, +} +struct ClampParams { + IsInput: i32, + IsTarget: i32, + Ge: f32, + Add: i32, + ErrThr: f32, + pad: f32, + pad1: f32, + pad2: f32, +} +struct SMaintParams { + On: i32, + NNeurons: f32, + Gbar: f32, + Inhib: f32, + ISI: F32, +} +struct PopCodeParams { + On: i32, + Ge: f32, + Min: f32, + Max: f32, + MinAct: f32, + MinSigma: f32, + MaxSigma: f32, + Clip: i32, +} +struct ActParams { + Spikes: SpikeParams, + Dend: DendParams, + Init: ActInitParams, + Decay: DecayParams, + Dt: DtParams, + Gbar: Chans, + Erev: Chans, + Clamp: ClampParams, + Noise: SpikeNoiseParams, + VmRange: F32, + Mahp: MahpParams, + Sahp: SahpParams, + KNa: KNaMedSlow, + Kir: KirParams, + NMDA: NMDAParams, + MaintNMDA: NMDAParams, + GabaB: GABABParams, + VGCC: VGCCParams, + AK: AKsParams, + SKCa: SKCaParams, + SMaint: SMaintParams, + PopCode: PopCodeParams, +} + +///////////// import: "chans-ak.go" +struct AKsParams { + Gbar: f32, + Hf: f32, + Mf: f32, + Voff: f32, + Vmax: f32, + pad: i32, + pad1: i32, + pad2: i32, +} + +///////////// import: "chans-chans.go" +struct Chans { + E: f32, + L: f32, + I: f32, + K: f32, +} + +///////////// import: "chans-gabab.go" +struct GABABParams { + Gbar: f32, + RiseTau: f32, + DecayTau: f32, + Gbase: f32, + GiSpike: f32, + MaxTime: f32, + TauFact: f32, + RiseDt: f32, + DecayDt: f32, + pad: f32, + pad1: f32, + pad2: f32, +} + +///////////// import: "chans-kir.go" +struct KirParams { + Gbar: f32, + MinfOff: f32, + MinfTau: f32, + RiseOff: f32, + RiseTau: f32, + DecayOff: f32, + DecayTau: f32, + Mrest: f32, +} + +///////////// import: "chans-kna.go" +struct KNaParams { + On: i32, + Rise: f32, + Max: f32, + Tau: f32, + Dt: f32, + pad: i32, + pad1: i32, + pad2: i32, +} +struct KNaMedSlow { + On: i32, + TrialSlow: i32, + pad: i32, + pad1: i32, + Med: KNaParams, + Slow: KNaParams, +} + +///////////// import: "chans-mahp.go" +struct MahpParams { + Gbar: f32, + Voff: f32, + Vslope: f32, + TauMax: f32, + Tadj: f32, + DtMax: f32, + pad: i32, + pad2: i32, +} + +///////////// import: "chans-nmda.go" +struct NMDAParams { + Gbar: f32, + Tau: f32, + ITau: f32, + MgC: f32, + Voff: f32, + Dt: f32, + IDt: f32, + MgFact: f32, +} + +///////////// import: "chans-sahp.go" +struct SahpParams { + Gbar: f32, + CaTau: f32, + Off: f32, + Slope: f32, + TauMax: f32, + CaDt: f32, + DtMax: f32, + pad: i32, +} + +///////////// import: "chans-skca.go" +struct SKCaParams { + Gbar: f32, + C50: f32, + ActTau: f32, + DeTau: f32, + KCaR: f32, + CaRDecayTau: f32, + CaInThr: f32, + CaInTau: f32, + ActDt: f32, + DeDt: f32, + CaRDecayDt: f32, + CaInDt: f32, +} + +///////////// import: "chans-vgcc.go" +struct VGCCParams { + Gbar: f32, + Ca: f32, + pad: i32, + pad1: i32, +} + +///////////// import: "context.go" +struct Context { + NData: u32, + Mode: i32, + Testing: i32, + Phase: i32, + PlusPhase: i32, + PhaseCycle: i32, + Cycle: i32, + ThetaCycles: i32, + CyclesTotal: i32, + Time: f32, + TrialsTotal: i32, + TimePerCycle: f32, + SlowInterval: i32, + SlowCounter: i32, + pad: i32, + pad1: i32, + RandCounter: RandCounter, +} + +///////////// import: "deep-layer.go" +struct BurstParams { + ThrRel: f32, + ThrAbs: f32, + pad: f32, + pad1: f32, +} +struct CTParams { + GeGain: f32, + DecayTau: f32, + OFCposPT: i32, + DecayDt: f32, +} +struct PulvParams { + DriveScale: f32, + FullDriveAct: f32, + DriveLayIndex: i32, + pad: f32, +} + +///////////// import: "deep-path.go" + +///////////// import: "enumgen.go" +const PathGTypesN: PathGTypes = 5; +const GlobalScalarVarsN: GlobalScalarVars = 57; +const GlobalVectorVarsN: GlobalVectorVars = 10; +const GPUVarsN: GPUVars = 22; +const LayerTypesN: LayerTypes = 30; +const LayerVarsN: LayerVars = 11; +const ViewTimesN: ViewTimes = 7; +const DAModTypesN: DAModTypes = 4; +const ValenceTypesN: ValenceTypes = 3; +const NeuronFlagsN: NeuronFlags = 9; +const NeuronVarsN: NeuronVars = 90; +const NeuronAvgVarsN: NeuronAvgVars = 7; +const NeuronIndexVarsN: NeuronIndexVars = 3; +const PathTypesN: PathTypes = 12; +const GPLayerTypesN: GPLayerTypes = 3; +const PoolIntVarsN: PoolIntVars = 10; +const AvgMaxN: AvgMax = 2; +const AvgMaxPhasesN: AvgMaxPhases = 4; +const AvgMaxVarsN: AvgMaxVars = 7; +const SynapseVarsN: SynapseVars = 5; +const SynapseTraceVarsN: SynapseTraceVars = 3; +const SynapseIndexVarsN: SynapseIndexVars = 3; + +///////////// import: "fsfffb-enumgen.go" +const InhibVarsN: InhibVars = 16; + +///////////// import: "fsfffb-fsfffb.go" +struct GiParams { + On: i32, + Gi: f32, + FB: f32, + FSTau: f32, + SS: f32, + SSfTau: f32, + SSiTau: f32, + FS0: f32, + FFAvgTau: f32, + FFPrv: f32, + ClampExtMin: f32, + FSDt: f32, + SSfDt: f32, + SSiDt: f32, + FFAvgDt: f32, + pad: f32, +} + +///////////// import: "fsfffb-inhib.go" +alias InhibVars = i32; //enums:enum +const FFsRaw: InhibVars = 0; +const FBsRaw: InhibVars = 1; +const GeExtRaw: InhibVars = 2; +const FFs: InhibVars = 3; +const FBs: InhibVars = 4; +const GeExts: InhibVars = 5; +const FSi: InhibVars = 6; +const SSi: InhibVars = 7; +const SSf: InhibVars = 8; +const FSGi: InhibVars = 9; +const SSGi: InhibVars = 10; +const TotalGi: InhibVars = 11; +const GiOrig: InhibVars = 12; +const LayGi: InhibVars = 13; +const FFAvg: InhibVars = 14; +const FFAvgPrv: InhibVars = 15; + +///////////// import: "globals.go" +alias GlobalScalarVars = i32; //enums:enum +const GvRew: GlobalScalarVars = 0; +const GvHasRew: GlobalScalarVars = 1; +const GvRewPred: GlobalScalarVars = 2; +const GvPrevPred: GlobalScalarVars = 3; +const GvHadRew: GlobalScalarVars = 4; +const GvDA: GlobalScalarVars = 5; +const GvDAtonic: GlobalScalarVars = 6; +const GvACh: GlobalScalarVars = 7; +const GvNE: GlobalScalarVars = 8; +const GvSer: GlobalScalarVars = 9; +const GvAChRaw: GlobalScalarVars = 10; +const GvGoalMaint: GlobalScalarVars = 11; +const GvVSMatrixJustGated: GlobalScalarVars = 12; +const GvVSMatrixHasGated: GlobalScalarVars = 13; +const GvCuriosityPoolGated: GlobalScalarVars = 14; +const GvTime: GlobalScalarVars = 15; +const GvEffort: GlobalScalarVars = 16; +const GvUrgencyRaw: GlobalScalarVars = 17; +const GvUrgency: GlobalScalarVars = 18; +const GvHasPosUS: GlobalScalarVars = 19; +const GvHadPosUS: GlobalScalarVars = 20; +const GvNegUSOutcome: GlobalScalarVars = 21; +const GvHadNegUSOutcome: GlobalScalarVars = 22; +const GvPVposSum: GlobalScalarVars = 23; +const GvPVpos: GlobalScalarVars = 24; +const GvPVnegSum: GlobalScalarVars = 25; +const GvPVneg: GlobalScalarVars = 26; +const GvPVposEst: GlobalScalarVars = 27; +const GvPVposVar: GlobalScalarVars = 28; +const GvPVnegEst: GlobalScalarVars = 29; +const GvPVnegVar: GlobalScalarVars = 30; +const GvGoalDistEst: GlobalScalarVars = 31; +const GvGoalDistPrev: GlobalScalarVars = 32; +const GvProgressRate: GlobalScalarVars = 33; +const GvGiveUpUtility: GlobalScalarVars = 34; +const GvContUtility: GlobalScalarVars = 35; +const GvGiveUpTiming: GlobalScalarVars = 36; +const GvContTiming: GlobalScalarVars = 37; +const GvGiveUpProgress: GlobalScalarVars = 38; +const GvContProgress: GlobalScalarVars = 39; +const GvGiveUpSum: GlobalScalarVars = 40; +const GvContSum: GlobalScalarVars = 41; +const GvGiveUpProb: GlobalScalarVars = 42; +const GvGiveUp: GlobalScalarVars = 43; +const GvGaveUp: GlobalScalarVars = 44; +const GvVSPatchPos: GlobalScalarVars = 45; +const GvVSPatchPosThr: GlobalScalarVars = 46; +const GvVSPatchPosRPE: GlobalScalarVars = 47; +const GvVSPatchPosSum: GlobalScalarVars = 48; +const GvVSPatchPosPrev: GlobalScalarVars = 49; +const GvVSPatchPosVar: GlobalScalarVars = 50; +const GvLHbDip: GlobalScalarVars = 51; +const GvLHbBurst: GlobalScalarVars = 52; +const GvLHbPVDA: GlobalScalarVars = 53; +const GvCeMpos: GlobalScalarVars = 54; +const GvCeMneg: GlobalScalarVars = 55; +const GvVtaDA: GlobalScalarVars = 56; +const MaxGlobalVecN = 16; +alias GlobalVectorVars = i32; //enums:enum +const GvCost: GlobalVectorVars = 0; +const GvCostRaw: GlobalVectorVars = 1; +const GvUSneg: GlobalVectorVars = 2; +const GvUSnegRaw: GlobalVectorVars = 3; +const GvDrives: GlobalVectorVars = 4; +const GvUSpos: GlobalVectorVars = 5; +const GvVSPatchD1: GlobalVectorVars = 6; +const GvVSPatchD2: GlobalVectorVars = 7; +const GvOFCposPTMaint: GlobalVectorVars = 8; +const GvVSMatrixPoolGated: GlobalVectorVars = 9; + +///////////// import: "hip_paths.go" +struct HipPathParams { + Hebb: f32, + Err: f32, + SAvgCor: f32, + SAvgThr: f32, + SNominal: f32, + pad: f32, + pad1: f32, + pad2: f32, +} + +///////////// import: "inhib.go" +struct ActAvgParams { + Nominal: f32, + AdaptGi: i32, + Offset: f32, + HiTol: f32, + LoTol: f32, + AdaptRate: f32, + pad: f32, + pad1: f32, +} +struct InhibParams { + ActAvg: ActAvgParams, + Layer: GiParams, + Pool: GiParams, +} + +///////////// import: "init-layer.go" + +///////////// import: "kinase-params.go" +struct CaDtParams { //types:add + MTau: f32, + PTau: f32, + DTau: f32, + MDt: f32, + PDt: f32, + DDt: f32, + pad: i32, + pad1: i32, +} +struct NeurCaParams { + SpikeG: f32, + SynTau: f32, + SynDt: f32, + pad: i32, + Dt: CaDtParams, +} +struct SynCaParams { //types:add + CaScale: f32, + pad: i32, + pad1: i32, + pad2: i32, + Dt: CaDtParams, +} +struct BinWeights { //types:add + Bin0: f32, + Bin1: f32, + Bin2: f32, + Bin3: f32, + Bin4: f32, + Bin5: f32, + Bin6: f32, + Bin7: f32, +} +struct SynCaLinear { //types:add + CaP: BinWeights, + CaD: BinWeights, + CaGain: f32, + pad: f32, + pad1: f32, + pad2: f32, +} + +///////////// import: "layerparams.go" +struct LayerIndexes { + NPools: u32, + NeurSt: u32, + NNeurons: u32, + RecvSt: u32, + RecvN: u32, + SendSt: u32, + SendN: u32, + ExtsSt: u32, + ShpPlY: i32, + ShpPlX: i32, + ShpUnY: i32, + ShpUnX: i32, +} +struct LayerInhibIndexes { + Index1: i32, + Index2: i32, + Index3: i32, + Index4: i32, +} +struct LayerParams { + Type: LayerTypes, + Index: u32, + MaxData: u32, + PoolSt: u32, + Acts: ActParams, + Inhib: InhibParams, + LayInhib: LayerInhibIndexes, + Learn: LearnNeurParams, + Bursts: BurstParams, + CT: CTParams, + Pulv: PulvParams, + Matrix: MatrixParams, + GP: GPParams, + LDT: LDTParams, + VTA: VTAParams, + RWPred: RWPredParams, + RWDa: RWDaParams, + TDInteg: TDIntegParams, + TDDa: TDDaParams, + Indexes: LayerIndexes, +} + +///////////// import: "layertypes.go" +alias LayerTypes = i32; //enums:enum +const SuperLayer: LayerTypes = 0; +const InputLayer: LayerTypes = 1; +const TargetLayer: LayerTypes = 2; +const CompareLayer: LayerTypes = 3; +const CTLayer: LayerTypes = 4; +const PulvinarLayer: LayerTypes = 5; +const TRNLayer: LayerTypes = 6; +const PTMaintLayer: LayerTypes = 7; +const PTPredLayer: LayerTypes = 8; +const MatrixLayer: LayerTypes = 9; +const STNLayer: LayerTypes = 10; +const GPLayer: LayerTypes = 11; +const BGThalLayer: LayerTypes = 12; +const VSGatedLayer: LayerTypes = 13; +const BLALayer: LayerTypes = 14; +const CeMLayer: LayerTypes = 15; +const VSPatchLayer: LayerTypes = 16; +const LHbLayer: LayerTypes = 17; +const DrivesLayer: LayerTypes = 18; +const UrgencyLayer: LayerTypes = 19; +const USLayer: LayerTypes = 20; +const PVLayer: LayerTypes = 21; +const LDTLayer: LayerTypes = 22; +const VTALayer: LayerTypes = 23; +const RewLayer: LayerTypes = 24; +const RWPredLayer: LayerTypes = 25; +const RWDaLayer: LayerTypes = 26; +const TDPredLayer: LayerTypes = 27; +const TDIntegLayer: LayerTypes = 28; +const TDDaLayer: LayerTypes = 29; + +///////////// import: "layervars.go" +alias LayerVars = i32; //enums:enum +const LayerActMAvg: LayerVars = 0; +const LayerActPAvg: LayerVars = 1; +const LayerAvgMaxGeM: LayerVars = 2; +const LayerAvgMaxGiM: LayerVars = 3; +const LayerGiMult: LayerVars = 4; +const LayerPhaseDiff: LayerVars = 5; +const LayerPhaseDiffAvg: LayerVars = 6; +const LayerPhaseDiffVar: LayerVars = 7; +const LayerRT: LayerVars = 8; +const LayerRewPredPos: LayerVars = 9; +const LayerRewPredNeg: LayerVars = 10; + +///////////// import: "learn-layer.go" +fn LayerParams_SlowAdaptNeuron(ly: ptr, ctx: ptr, ri: u32) { + var lni = ri - (*ly).Indexes.NeurSt; + var rn = (*ly).Indexes.RecvN; + for (var pi = u32(0); pi < rn; pi++) { + var pti = RecvPathIxs[IndexU321D(RecvPathIxs[0], u32((*ly).Indexes.RecvSt + pi))]; + var paths=Paths[pti]; PathParams_SlowAdapt(&paths, ctx, ly, pti, ri, lni); + } +} + +///////////// import: "learn-net.go" +fn SlowAdaptNeuron(ni: u32) { //gosl:kernel + var ctx = Ctx[0]; + var li = NeuronIxs[IndexU322D(NeuronIxs[0], NeuronIxs[1], u32(ni),u32(NrnLayIndex))]; + var layers=Layers[li]; LayerParams_SlowAdaptNeuron(&layers, &ctx, ni); + Ctx[0] = ctx; +} + +///////////// import: "learn-path.go" +fn PathParams_SlowAdapt(pt: ptr, ctx: ptr, rlay: ptr, pti: u32,ri: u32,lni: u32) { + PathParams_SWtFromWt(pt, ctx, rlay, pti, ri, lni); + PathParams_SynScale(pt, ctx, rlay, pti, ri, lni); +} +fn PathParams_SWtFromWt(pt: ptr, ctx: ptr, rlay: ptr, pti: u32,ri: u32,lni: u32) { + if ((*pt).Learn.Learn == 0 || (*pt).SWts.Adapt.On == 0) { + return; + } + if (LayerParams_IsTarget(rlay)) { + return; + } + var mx = (*pt).SWts.Limit.Max; + var mn = (*pt).SWts.Limit.Min; + var lr = (*pt).SWts.Adapt.LRate; + var cni = (*pt).Indexes.RecvConSt + lni; + var synn = PathRecvCon[IndexU322D(PathRecvCon[0], PathRecvCon[1], u32(cni),u32(Nitems))]; + var synst = (*pt).Indexes.RecvSynSt + PathRecvCon[IndexU322D(PathRecvCon[0], PathRecvCon[1], u32(cni),u32(StartOff))]; + var avgDWt = f32(0); + for (var ci = u32(0); ci < synn; ci++) { + var syni = RecvSynIxs[IndexU321D(RecvSynIxs[0], u32(synst + ci))]; + var swt = Synapses[IndexF322D(Synapses[0], Synapses[ + 1], u32(syni),u32(SWt))]; + if (Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(DSWt))] >= 0) { + Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(DSWt))] *= (mx - swt); + } else { + Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(DSWt))] *= (swt - mn); + } + avgDWt += Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(DSWt))]; + } + avgDWt /= f32(synn); + avgDWt *= (*pt).SWts.Adapt.SubMean; + for (var ci = u32(0); ci < synn; ci++) { + var syni = RecvSynIxs[IndexU321D(RecvSynIxs[0], u32(synst + ci))]; + Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(SWt))] += lr * (Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(DSWt))] - avgDWt); + var swt = Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(SWt))]; + Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(DSWt))] = 0.0; + Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(LWt))] = SWtParams_LWtFromWts(&(*pt).SWts, Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(Wt))], swt); + Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(Wt))] = SWtParams_WtValue(&(*pt).SWts, swt, Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(LWt))]); + } +} +fn PathParams_SynScale(pt: ptr, ctx: ptr, rlay: ptr, pti: u32,ri: u32,lni: u32) { + if ((*pt).Learn.Learn == 0 || PathParams_IsInhib(pt)) { + return; + } + if (!LayerParams_IsLearnTrgAvg(rlay)) { + return; + } + var lr = (*rlay).Learn.TrgAvgAct.SynScaleRate; + var cni = (*pt).Indexes.RecvConSt + lni; + var synn = PathRecvCon[IndexU322D(PathRecvCon[0], PathRecvCon[1], u32(cni),u32(Nitems))]; + var synst = (*pt).Indexes.RecvSynSt + PathRecvCon[IndexU322D(PathRecvCon[0], PathRecvCon[1], u32(cni),u32(StartOff))]; + var adif = -lr * NeuronAvgs[IndexF322D(NeuronAvgs[0], NeuronAvgs[1], u32(ri),u32(AvgDif))]; + for (var ci = u32(0); ci < synn; ci++) { + var syni = RecvSynIxs[IndexU321D(RecvSynIxs[0], u32(synst + ci))]; + var lwt = Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(LWt))]; + var swt = Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(SWt))]; + if (adif >= 0) { // key to have soft bounding on lwt here! + Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(LWt))] += (1 - lwt) * adif * swt; + } else { + Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(LWt))] += lwt * adif * swt; + } + Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(Wt))] = SWtParams_WtValue(&(*pt).SWts, swt, Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(LWt))]); + } +} + +///////////// import: "learn.go" +struct CaLrnParams { + Norm: f32, + SpkVGCC: i32, + SpkVgccCa: f32, + VgccTau: f32, + Dt: CaDtParams, + UpdateThr: f32, + VgccDt: f32, + NormInv: f32, + pad: i32, +} +struct TrgAvgActParams { + GiBaseInit: f32, + RescaleOn: i32, + ErrLRate: f32, + SynScaleRate: f32, + SubMean: f32, + Permute: i32, + Pool: i32, + pad: i32, + TrgRange: F32, +} +struct RLRateParams { + On: i32, + SigmoidLinear: i32, + SigmoidMin: f32, + Diff: i32, + SpkThr: f32, + DiffThr: f32, + Min: f32, + pad: i32, +} +struct LearnNeurParams { + CaLearn: CaLrnParams, + CaSpk: NeurCaParams, + LrnNMDA: NMDAParams, + TrgAvgAct: TrgAvgActParams, + RLRate: RLRateParams, + NeuroMod: NeuroModParams, +} +fn SigFun(w: f32,gain: f32,off: f32) -> f32 { + if (w <= 0) { + return f32(0); + } + if (w >= 1) { + return f32(1); + }return (1 / (1 + pow((off*(1-w))/w, gain))); +} +fn SigFun61(w: f32) -> f32 { + if (w <= 0) { + return f32(0); + } + if (w >= 1) { + return f32(1); + } + var pw = (1 - w) / w;return (1 / (1 + pw*pw*pw*pw*pw*pw)); +} +fn SigInvFun(w: f32,gain: f32,off: f32) -> f32 { + if (w <= 0) { + return f32(0); + } + if (w >= 1) { + return f32(1); + }return 1.0 / (1.0 + pow((1.0-w)/w, 1/gain)/off); +} +fn SigInvFun61(w: f32) -> f32 { + if (w <= 0) { + return f32(0); + } + if (w >= 1) { + return f32(1); + } + var rval = 1.0 / (1.0 + pow((1.0-w)/w, 1.0/6.0));return rval; +} +struct SWtInitParams { + SPct: f32, + Mean: f32, + Var: f32, + Sym: i32, +} +struct SWtAdaptParams { + On: i32, + LRate: f32, + SubMean: f32, + SigGain: f32, +} +struct SWtParams { + Init: SWtInitParams, + Adapt: SWtAdaptParams, + Limit: F32, +} +fn SWtParams_WtValue(sp: ptr, swt: f32,lwt: f32) -> f32 { + return swt * SWtParams_SigFromLinWt(sp, lwt); +} +fn SWtParams_SigFromLinWt(sp: ptr, lw: f32) -> f32 { + var wt: f32; + if ((*sp).Adapt.SigGain == 1) { + wt = lw; + } else if ((*sp).Adapt.SigGain == 6) { + wt = SigFun61(lw); + } else { + wt = SigFun(lw, (*sp).Adapt.SigGain, f32(f32(1))); + }return 2.0 * wt; // center at 1 instead of .5 +} +fn SWtParams_LinFromSigWt(sp: ptr, wt: f32) -> f32 { + var wte = wt * 0.5; + if (wte < 0) { + wte = f32(0); + } else if (wte > 1) { + wte = f32(1); + } + if ((*sp).Adapt.SigGain == 1) { + return wte; + } + if ((*sp).Adapt.SigGain == 6) { + return SigInvFun61(wte); + }return SigInvFun(wte, (*sp).Adapt.SigGain, f32(f32(1))); +} +fn SWtParams_LWtFromWts(sp: ptr, wt: f32,swt: f32) -> f32 { + var rwt = wt / swt;return SWtParams_LinFromSigWt(sp, rwt); +} +struct LRateParams { + Base: f32, + Sched: f32, + Mod: f32, + Eff: f32, +} +struct TraceParams { + Tau: f32, + SubMean: f32, + LearnThr: f32, + Dt: f32, +} +struct LRateMod { + On: i32, + Base: f32, + pad: i32, + pad1: i32, + Range: F32, +} +struct HebbParams { + On: i32, + Up: f32, + Down: f32, + pad: f32, +} +struct LearnSynParams { + Learn: i32, + pad: i32, + pad1: i32, + pad2: i32, + LRate: LRateParams, + Trace: TraceParams, + KinaseCa: SynCaLinear, + Hebb: HebbParams, +} + +///////////// import: "looper.go" +alias ViewTimes = i32; //enums:enum +const Cycle: ViewTimes = 0; +const FastSpike: ViewTimes = 1; +const Gamma: ViewTimes = 2; +const Beta: ViewTimes = 3; +const Alpha: ViewTimes = 4; +const Phase: ViewTimes = 5; +const Theta: ViewTimes = 6; + +///////////// import: "math32-fastexp.go" + +///////////// import: "minmax-avgmax.go" +const MaxFloat32: f32 = 3.402823466e+38; +const MinFloat32: f32 = 1.175494351e-38; +struct AvgMax32 { + Avg: f32, + Max: f32, + Sum: f32, + MaxIndex: i32, + N: i32, + pad: i32, + pad1: i32, + pad2: i32, +} + +///////////// import: "minmax-minmax32.go" +struct F32 { + Min: f32, + Max: f32, + pad: i32, + pad1: i32, // for gpu use +} + +///////////// import: "network.go" +struct NetworkIndexes { + MaxData: u32, + MaxDelay: u32, + NLayers: u32, + NNeurons: u32, + NPools: u32, + NPaths: u32, + NSyns: u32, + RubiconNPosUSs: u32, + RubiconNCosts: u32, + RubiconNNegUSs: u32, + GPUMaxBuffFloats: u32, + GPUSynCaBanks: u32, +} + +///////////// import: "neuromod.go" +alias DAModTypes = i32; //enums:enum +const NoDAMod: DAModTypes = 0; +const D1Mod: DAModTypes = 1; +const D2Mod: DAModTypes = 2; +const D1AbsMod: DAModTypes = 3; +alias ValenceTypes = i32; //enums:enum +const Positive: ValenceTypes = 0; +const Negative: ValenceTypes = 1; +const Cost: ValenceTypes = 2; +struct NeuroModParams { + DAMod: DAModTypes, + Valence: ValenceTypes, + DAModGain: f32, + DALRateSign: i32, + DALRateMod: f32, + AChLRateMod: f32, + AChDisInhib: f32, + BurstGain: f32, + DipGain: f32, + pad: f32, + pad1: f32, + pad2: f32, +} + +///////////// import: "neuron.go" +alias NeuronFlags = i32; //enums:enum +const NeuronOff: NeuronFlags = 1; +const NeuronHasExt: NeuronFlags = 2; +const NeuronHasTarg: NeuronFlags = 4; +const NeuronHasCmpr: NeuronFlags = 8; +alias NeuronVars = i32; //enums:enum +const Spike: NeuronVars = 0; +const Spiked: NeuronVars = 1; +const Act: NeuronVars = 2; +const ActInt: NeuronVars = 3; +const Ge: NeuronVars = 4; +const Gi: NeuronVars = 5; +const Gk: NeuronVars = 6; +const Inet: NeuronVars = 7; +const Vm: NeuronVars = 8; +const VmDend: NeuronVars = 9; +const ISI: NeuronVars = 10; +const ISIAvg: NeuronVars = 11; +const Ext: NeuronVars = 12; +const Target: NeuronVars = 13; +const CaSpkM: NeuronVars = 14; +const CaSpkP: NeuronVars = 15; +const CaSpkD: NeuronVars = 16; +const CaSpkPM: NeuronVars = 17; +const CaLrn: NeuronVars = 18; +const NrnCaM: NeuronVars = 19; +const NrnCaP: NeuronVars = 20; +const NrnCaD: NeuronVars = 21; +const CaDiff: NeuronVars = 22; +const RLRate: NeuronVars = 23; +const GnmdaSyn: NeuronVars = 24; +const Gnmda: NeuronVars = 25; +const GnmdaLrn: NeuronVars = 26; +const GnmdaMaint: NeuronVars = 27; +const NmdaCa: NeuronVars = 28; +const Gvgcc: NeuronVars = 29; +const VgccM: NeuronVars = 30; +const VgccH: NeuronVars = 31; +const VgccCa: NeuronVars = 32; +const VgccCaInt: NeuronVars = 33; +const Burst: NeuronVars = 34; +const BurstPrv: NeuronVars = 35; +const CtxtGe: NeuronVars = 36; +const CtxtGeRaw: NeuronVars = 37; +const CtxtGeOrig: NeuronVars = 38; +const GgabaB: NeuronVars = 39; +const GABAB: NeuronVars = 40; +const GABABx: NeuronVars = 41; +const Gak: NeuronVars = 42; +const SSGiDend: NeuronVars = 43; +const GknaMed: NeuronVars = 44; +const GknaSlow: NeuronVars = 45; +const Gkir: NeuronVars = 46; +const KirM: NeuronVars = 47; +const Gsk: NeuronVars = 48; +const SKCaIn: NeuronVars = 49; +const SKCaR: NeuronVars = 50; +const SKCaM: NeuronVars = 51; +const Gmahp: NeuronVars = 52; +const MahpN: NeuronVars = 53; +const Gsahp: NeuronVars = 54; +const SahpCa: NeuronVars = 55; +const SahpN: NeuronVars = 56; +const ActM: NeuronVars = 57; +const ActP: NeuronVars = 58; +const SpkSt1: NeuronVars = 59; +const SpkSt2: NeuronVars = 60; +const SpkMax: NeuronVars = 61; +const SpkMaxCa: NeuronVars = 62; +const SpkBin0: NeuronVars = 63; +const SpkBin1: NeuronVars = 64; +const SpkBin2: NeuronVars = 65; +const SpkBin3: NeuronVars = 66; +const SpkBin4: NeuronVars = 67; +const SpkBin5: NeuronVars = 68; +const SpkBin6: NeuronVars = 69; +const SpkBin7: NeuronVars = 70; +const SpkPrv: NeuronVars = 71; +const GeNoise: NeuronVars = 72; +const GeNoiseP: NeuronVars = 73; +const GiNoise: NeuronVars = 74; +const GiNoiseP: NeuronVars = 75; +const GeExt: NeuronVars = 76; +const GeRaw: NeuronVars = 77; +const GeSyn: NeuronVars = 78; +const GiRaw: NeuronVars = 79; +const GiSyn: NeuronVars = 80; +const GeInt: NeuronVars = 81; +const GeIntNorm: NeuronVars = 82; +const GiInt: NeuronVars = 83; +const GModRaw: NeuronVars = 84; +const GModSyn: NeuronVars = 85; +const SMaintP: NeuronVars = 86; +const GMaintRaw: NeuronVars = 87; +const GMaintSyn: NeuronVars = 88; +const NeurFlags: NeuronVars = 89; +alias NeuronAvgVars = i32; //enums:enum +const ActAvg: NeuronAvgVars = 0; +const AvgPct: NeuronAvgVars = 1; +const TrgAvg: NeuronAvgVars = 2; +const DTrgAvg: NeuronAvgVars = 3; +const AvgDif: NeuronAvgVars = 4; +const GeBase: NeuronAvgVars = 5; +const GiBase: NeuronAvgVars = 6; +alias NeuronIndexVars = i32; //enums:enum +const NrnNeurIndex: NeuronIndexVars = 0; +const NrnLayIndex: NeuronIndexVars = 1; +const NrnSubPool: NeuronIndexVars = 2; + +///////////// import: "pathparams.go" +const StartOff: i32 = 0; +const Nitems: i32 = 1; +const StartNN: i32 = 2; +struct StartN { + Start: u32, + N: u32, + pad: u32, + pad1: u32, // todo: see if we can do without these? +} +struct PathIndexes { + RecvLayer: u32, + RecvNeurSt: u32, + RecvNeurN: u32, + SendLayer: u32, + SendNeurSt: u32, + SendNeurN: u32, + SynapseSt: u32, + SendConSt: u32, + RecvConSt: u32, + RecvSynSt: u32, + NPathNeurSt: u32, + pad: u32, +} +struct GScaleValues { + Scale: f32, + Rel: f32, + pad: f32, + pad1: f32, +} +struct PathParams { + Type: PathTypes, + Index: u32, + pad: i32, + pad1: i32, + Indexes: PathIndexes, + Com: SynComParams, + PathScale: PathScaleParams, + SWts: SWtParams, + Learn: LearnSynParams, + GScale: GScaleValues, + RLPred: RLPredPathParams, + Matrix: MatrixPathParams, + BLA: BLAPathParams, + Hip: HipPathParams, +} +fn PathParams_IsInhib(pt: ptr) -> bool { + return (*pt).Com.GType == InhibitoryG; +} + +///////////// import: "pathtypes.go" +alias PathTypes = i32; //enums:enum +const ForwardPath: PathTypes = 0; +const BackPath: PathTypes = 1; +const LateralPath: PathTypes = 2; +const InhibPath: PathTypes = 3; +const CTCtxtPath: PathTypes = 4; +const RWPath: PathTypes = 5; +const TDPredPath: PathTypes = 6; +const BLAPath: PathTypes = 7; +const HipPath: PathTypes = 8; +const VSPatchPath: PathTypes = 9; +const VSMatrixPath: PathTypes = 10; +const DSMatrixPath: PathTypes = 11; + +///////////// import: "pcore-layer.go" +struct MatrixParams { + GateThr: f32, + IsVS: i32, + OtherMatrixIndex: i32, + ThalLay1Index: i32, + ThalLay2Index: i32, + ThalLay3Index: i32, + ThalLay4Index: i32, + ThalLay5Index: i32, + ThalLay6Index: i32, + pad: i32, + pad1: i32, + pad2: i32, +} +alias GPLayerTypes = i32; //enums:enum +const GPePr: GPLayerTypes = 0; +const GPeAk: GPLayerTypes = 1; +const GPi: GPLayerTypes = 2; +struct GPParams { + GPType: GPLayerTypes, + pad: u32, + pad1: u32, + pad2: u32, +} + +///////////// import: "pcore-path.go" +struct MatrixPathParams { + Credit: f32, + BasePF: f32, + Delta: f32, + VSRewLearn: i32, +} + +///////////// import: "pool.go" +alias PoolIntVars = i32; //enums:enum +const PoolNeurSt: PoolIntVars = 0; +const PoolNeurEd: PoolIntVars = 1; +const PoolLayerIdx: PoolIntVars = 2; +const PoolIsLayer: PoolIntVars = 3; +const Clamped: PoolIntVars = 4; +const PoolGated: PoolIntVars = 5; +const FFsRawInt: PoolIntVars = 6; +const FBsRawInt: PoolIntVars = 7; +const GeExtRawInt: PoolIntVars = 8; +const PoolIntAvgMaxStart: PoolIntVars = 9; +alias AvgMax = i32; //enums:enum +const Avg: AvgMax = 0; +const Max: AvgMax = 1; +alias AvgMaxPhases = i32; //enums:enum -trim-prefix AM +const AMCycle: AvgMaxPhases = 0; +const AMMinus: AvgMaxPhases = 1; +const AMPlus: AvgMaxPhases = 2; +const AMPrev: AvgMaxPhases = 3; +alias AvgMaxVars = i32; //enums:enum -trim-prefix AM +const AMCaSpkP: AvgMaxVars = 0; +const AMCaSpkD: AvgMaxVars = 1; +const AMSpkMax: AvgMaxVars = 2; +const AMAct: AvgMaxVars = 3; +const AMGeInt: AvgMaxVars = 4; +const AMGiInt: AvgMaxVars = 5; +const AMAvgDif: AvgMaxVars = 6; +const poolFloatAvgMaxStart = InhibVarsN; +const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN)); +const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN)); +const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt); + +///////////// import: "rand.go" +alias RandFunIndex = u32; +const RandFunActPGe: RandFunIndex = 0; +const RandFunActPGi: RandFunIndex = 1; +const RandFunActSMaintP: RandFunIndex = 2; +const RandFunIndexN: RandFunIndex = 3; + +///////////// import: "rl-layer.go" +struct RWPredParams { + PredRange: F32, +} +struct RWDaParams { + TonicGe: f32, + RWPredLayIndex: i32, + pad: u32, + pad1: u32, +} +struct TDIntegParams { + Discount: f32, + PredGain: f32, + TDPredLayIndex: i32, + pad: u32, +} +struct TDDaParams { + TonicGe: f32, + TDIntegLayIndex: i32, + pad: u32, + pad1: u32, +} + +///////////// import: "rl-path.go" +struct RLPredPathParams { + OppSignLRate: f32, + DaTol: f32, + pad: f32, + pad1: f32, +} + +///////////// import: "rubicon-layer.go" +struct LDTParams { + SrcThr: f32, + Rew: i32, + MaintInhib: f32, + SrcLay1Index: i32, + SrcLay2Index: i32, + SrcLay3Index: i32, + SrcLay4Index: i32, + pad: f32, +} +struct VTAParams { + CeMGain: f32, + LHbGain: f32, + AChThr: f32, + pad: f32, +} + +///////////// import: "rubicon-path.go" +struct BLAPathParams { + NegDeltaLRate: f32, + AChThr: f32, + USTrace: f32, + pad: f32, +} + +///////////// import: "rubicon.go" + +///////////// import: "stats.go" + +///////////// import: "synapse.go" +alias SynapseVars = i32; //enums:enum +const Wt: SynapseVars = 0; +const LWt: SynapseVars = 1; +const SWt: SynapseVars = 2; +const DWt: SynapseVars = 3; +const DSWt: SynapseVars = 4; +alias SynapseTraceVars = i32; //enums:enum +const Tr: SynapseTraceVars = 0; +const DTr: SynapseTraceVars = 1; +const DiDWt: SynapseTraceVars = 2; +alias SynapseIndexVars = i32; //enums:enum +const SynRecvIndex: SynapseIndexVars = 0; +const SynSendIndex: SynapseIndexVars = 1; +const SynPathIndex: SynapseIndexVars = 2; + +///////////// import: "slrand.wgsl" +fn Philox2x32round(counter: su64, key: u32) -> su64 { + let mul = Uint32Mul64(u32(0xD256D193), counter.x); + var ctr: su64; + ctr.x = mul.y ^ key ^ counter.y; + ctr.y = mul.x; + return ctr; +} +fn Philox2x32bumpkey(key: u32) -> u32 { + return key + u32(0x9E3779B9); +} +fn Philox2x32(counter: su64, key: u32) -> vec2 { + var ctr = Philox2x32round(counter, key); // 1 + var ky = Philox2x32bumpkey(key); + ctr = Philox2x32round(ctr, ky); // 2 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 3 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 4 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 5 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 6 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 7 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 8 + ky = Philox2x32bumpkey(ky); + ctr = Philox2x32round(ctr, ky); // 9 + ky = Philox2x32bumpkey(ky); + return Philox2x32round(ctr, ky); // 10 +} +fn RandUint32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { + return Philox2x32(Uint64Add32(counter, funcIndex), key); +} +fn RandUint32(counter: su64, funcIndex: u32, key: u32) -> u32 { + return Philox2x32(Uint64Add32(counter, funcIndex), key).x; +} +fn RandFloat32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { + return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); +} +fn RandFloat32(counter: su64, funcIndex: u32, key: u32) -> f32 { + return Uint32ToFloat32(RandUint32(counter, funcIndex, key)); +} +fn RandFloat32Range11Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { + return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key)); +} +fn RandFloat32Range11(counter: su64, funcIndex: u32, key: u32) -> f32 { + return Uint32ToFloat32Range11(RandUint32(counter, funcIndex, key)); +} +fn RandBoolP(counter: su64, funcIndex: u32, key: u32, p: f32) -> bool { + return (RandFloat32(counter, funcIndex, key) < p); +} +fn sincospi(x: f32) -> vec2 { + let PIf = 3.1415926535897932; + var r: vec2; + r.x = cos(PIf*x); + r.y = sin(PIf*x); + return r; +} +fn RandFloat32NormVec2(counter: su64, funcIndex: u32, key: u32) -> vec2 { + let ur = RandUint32Vec2(counter, funcIndex, key); + var f = sincospi(Uint32ToFloat32Range11(ur.x)); + let r = sqrt(-2.0 * log(Uint32ToFloat32(ur.y))); // guaranteed to avoid 0. + return f * r; +} +fn RandFloat32Norm(counter: su64, funcIndex: u32, key: u32) -> f32 { + return RandFloat32Vec2(counter, funcIndex, key).x; +} +fn RandUint32N(counter: su64, funcIndex: u32, key: u32, n: u32) -> u32 { + let v = RandFloat32(counter, funcIndex, key); + return u32(v * f32(n)); +} +struct RandCounter { + Counter: su64, + HiSeed: u32, + pad: u32, +} +fn RandCounter_Reset(ct: ptr) { + (*ct).Counter.x = u32(0); + (*ct).Counter.y = (*ct).HiSeed; +} +fn RandCounter_Seed(ct: ptr, seed: u32) { + (*ct).HiSeed = seed; + RandCounter_Reset(ct); +} +fn RandCounter_Add(ct: ptr, inc: u32) { + (*ct).Counter = Uint64Add32((*ct).Counter, inc); +} + +///////////// import: "sltype.wgsl" +alias su64 = vec2; +fn Uint32Mul64(a: u32, b: u32) -> su64 { + let LOMASK = (((u32(1))<<16)-1); + var r: su64; + r.x = a * b; /* full low multiply */ + let ahi = a >> 16; + let alo = a & LOMASK; + let bhi = b >> 16; + let blo = b & LOMASK; + let ahbl = ahi * blo; + let albh = alo * bhi; + let ahbl_albh = ((ahbl&LOMASK) + (albh&LOMASK)); + var hit = ahi*bhi + (ahbl>>16) + (albh>>16); + hit += ahbl_albh >> 16; /* carry from the sum of lo(ahbl) + lo(albh) ) */ + /* carry from the sum with alo*blo */ + if ((r.x >> u32(16)) < (ahbl_albh&LOMASK)) { + hit += u32(1); + } + r.y = hit; + return r; +} +/* +fn Uint32Mul64(a: u32, b: u32) -> su64 { + return su64(a) * su64(b); +} +*/ +fn Uint64Add32(a: su64, b: u32) -> su64 { + if (b == 0) { + return a; + } + var s = a; + if (s.x > u32(0xffffffff) - b) { + s.y++; + s.x = (b - 1) - (u32(0xffffffff) - s.x); + } else { + s.x += b; + } + return s; +} +fn Uint64Incr(a: su64) -> su64 { + var s = a; + if(s.x == 0xffffffff) { + s.y++; + s.x = u32(0); + } else { + s.x++; + } + return s; +} +fn Uint32ToFloat32(val: u32) -> f32 { + let factor = f32(1.0) / (f32(u32(0xffffffff)) + f32(1.0)); + let halffactor = f32(0.5) * factor; + var f = f32(val) * factor + halffactor; + if (f == 1.0) { // exclude 1 + return bitcast(0x3F7FFFFF); + } + return f; +} +fn Uint32ToFloat32Vec2(val: vec2) -> vec2 { + var r: vec2; + r.x = Uint32ToFloat32(val.x); + r.y = Uint32ToFloat32(val.y); + return r; +} +fn Uint32ToFloat32Range11(val: u32) -> f32 { + let factor = f32(1.0) / (f32(i32(0x7fffffff)) + f32(1.0)); + let halffactor = f32(0.5) * factor; + return (f32(val) * factor + halffactor); +} +fn Uint32ToFloat32Range11Vec2(val: vec2) -> vec2 { + var r: vec2; + r.x = Uint32ToFloat32Range11(val.x); + r.y = Uint32ToFloat32Range11(val.y); + return r; +} \ No newline at end of file diff --git a/axon/shaders/WtFromDWtSyn.wgsl b/axon/shaders/WtFromDWtSyn.wgsl index 1034ba05..ebbd3cb6 100644 --- a/axon/shaders/WtFromDWtSyn.wgsl +++ b/axon/shaders/WtFromDWtSyn.wgsl @@ -755,7 +755,8 @@ fn PathParams_WtFromDWtSynNoLimits(pt: ptr, ctx: ptr