From a7a3dc60ebb0ac3adca4d545233f8ee6ace97468 Mon Sep 17 00:00:00 2001 From: "Randall C. O'Reilly" Date: Sat, 2 Nov 2024 04:19:05 -0700 Subject: [PATCH] dwt building --- axon/gpu_wgsl/{ => done}/gpu_dwt.wgsl | 0 axon/gpu_wgsl/{ => done}/gpu_dwtfmdi.wgsl | 0 axon/layer-cpu.go | 32 --- axon/layer-cpu.goal | 32 --- axon/learn.go | 3 +- axon/learn.goal | 3 +- axon/network.go | 307 ++++++++++++---------- axon/network.goal | 304 +++++++++++---------- axon/path-algo.go | 26 -- axon/path-algo.goal | 26 -- axon/pathparams.go | 17 +- axon/pathparams.goal | 17 +- 12 files changed, 347 insertions(+), 420 deletions(-) rename axon/gpu_wgsl/{ => done}/gpu_dwt.wgsl (100%) rename axon/gpu_wgsl/{ => done}/gpu_dwtfmdi.wgsl (100%) diff --git a/axon/gpu_wgsl/gpu_dwt.wgsl b/axon/gpu_wgsl/done/gpu_dwt.wgsl similarity index 100% rename from axon/gpu_wgsl/gpu_dwt.wgsl rename to axon/gpu_wgsl/done/gpu_dwt.wgsl diff --git a/axon/gpu_wgsl/gpu_dwtfmdi.wgsl b/axon/gpu_wgsl/done/gpu_dwtfmdi.wgsl similarity index 100% rename from axon/gpu_wgsl/gpu_dwtfmdi.wgsl rename to axon/gpu_wgsl/done/gpu_dwtfmdi.wgsl diff --git a/axon/layer-cpu.go b/axon/layer-cpu.go index 21efe3cf2..b9386779b 100644 --- a/axon/layer-cpu.go +++ b/axon/layer-cpu.go @@ -352,38 +352,6 @@ func (ly *Layer) PhaseDiffFromActs(ctx *Context) { ////////////////////////////////////////////////////////////////////////////////////// // Learning -// DWt computes the weight change (learning), based on -// synaptically integrated spiking, computed at the Theta cycle interval. -// This is the trace version for hidden units, and uses syn CaP - CaD for targets. -func (ly *Layer) DWt(ctx *Context, si uint32) { - for _, pj := range ly.SendPaths { - if pj.Off { - continue - } - pj.DWt(ctx, si) - } -} - -// DWtSubMean computes subtractive normalization of the DWts -func (ly *Layer) DWtSubMean(ctx *Context, ri uint32) { - for _, pj := range ly.RecvPaths { - if pj.Off { - continue - } - pj.DWtSubMean(ctx, ri) - } -} - -// WtFromDWt updates weight values from delta weight changes -func (ly *Layer) WtFromDWt(ctx *Context, si uint32) { - for _, pj := range ly.SendPaths { - if pj.Off { - continue - } - pj.WtFromDWt(ctx, si) - } -} - // DTrgSubMean subtracts the mean from DTrgAvg values // Called by TrgAvgFromD func (ly *Layer) DTrgSubMean(ctx *Context) { diff --git a/axon/layer-cpu.goal b/axon/layer-cpu.goal index e43c04167..3c7b42dd8 100644 --- a/axon/layer-cpu.goal +++ b/axon/layer-cpu.goal @@ -350,38 +350,6 @@ func (ly *Layer) PhaseDiffFromActs(ctx *Context) { ////////////////////////////////////////////////////////////////////////////////////// // Learning -// DWt computes the weight change (learning), based on -// synaptically integrated spiking, computed at the Theta cycle interval. -// This is the trace version for hidden units, and uses syn CaP - CaD for targets. -func (ly *Layer) DWt(ctx *Context, si uint32) { - for _, pj := range ly.SendPaths { - if pj.Off { - continue - } - pj.DWt(ctx, si) - } -} - -// DWtSubMean computes subtractive normalization of the DWts -func (ly *Layer) DWtSubMean(ctx *Context, ri uint32) { - for _, pj := range ly.RecvPaths { - if pj.Off { - continue - } - pj.DWtSubMean(ctx, ri) - } -} - -// WtFromDWt updates weight values from delta weight changes -func (ly *Layer) WtFromDWt(ctx *Context, si uint32) { - for _, pj := range ly.SendPaths { - if pj.Off { - continue - } - pj.WtFromDWt(ctx, si) - } -} - // DTrgSubMean subtracts the mean from DTrgAvg values // Called by TrgAvgFromD func (ly *Layer) DTrgSubMean(ctx *Context) { diff --git a/axon/learn.go b/axon/learn.go index 00648cc06..64788bb97 100644 --- a/axon/learn.go +++ b/axon/learn.go @@ -675,8 +675,7 @@ func (tp *TraceParams) Update() { // TrFromCa returns updated trace factor as function of a // synaptic calcium update factor and current trace func (tp *TraceParams) TrFromCa(tr float32, ca float32) float32 { - tr += tp.Dt * (ca - tr) - return tr + return tr + tp.Dt*(ca-tr) } ////////////////////////////////////////////////////////////////////////////////////// diff --git a/axon/learn.goal b/axon/learn.goal index f845dfb5a..e25431b9b 100644 --- a/axon/learn.goal +++ b/axon/learn.goal @@ -673,8 +673,7 @@ func (tp *TraceParams) Update() { // TrFromCa returns updated trace factor as function of a // synaptic calcium update factor and current trace func (tp *TraceParams) TrFromCa(tr float32, ca float32) float32 { - tr += tp.Dt * (ca - tr) - return tr + return tr + tp.Dt*(ca-tr) } ////////////////////////////////////////////////////////////////////////////////////// diff --git a/axon/network.go b/axon/network.go index 29583eb69..a2f15b24b 100644 --- a/axon/network.go +++ b/axon/network.go @@ -100,6 +100,151 @@ func (nt *Network) Cycle() { // } } +// InitExt initializes external input state. +// Call prior to applying external inputs to layers. +func (nt *Network) InitExt(ctx *Context) { + // note: important to do this for GPU + // to ensure partial inputs work the same way on CPU and GPU. + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.InitExt(ctx) + } +} + +// ApplyExts applies external inputs to layers, based on values +// that were set in prior layer-specific ApplyExt calls. +// This does nothing on the CPU, but is critical for the GPU, +// and should be added to all sims where GPU will be used. +func (nt *Network) ApplyExts(ctx *Context) { + if !UseGPU { + return + } + nix := GetNetworkIxs(0) + nd := int(nix.NNeurons * ctx.NData) + RunApplyExtsNeuron(nd) +} + +// MinusPhase does updating after end of minus phase. +func (nt *Network) MinusPhase(ctx *Context) { + nix := GetNetworkIxs(0) + nd := int(nix.NNeurons * ctx.NData) + pd := int(nix.NPools * ctx.NData) + RunMinusPhasePool(pd) + RunMinusPhaseNeuron(nd) + nt.MinusPhasePost(ctx) + // todo: + // nt.GPU.SyncStateToGPU() +} + +// MinusPhasePost does special CPU post processing. +func (nt *Network) MinusPhasePost(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.MinusPhasePost(ctx) + } +} + +// PlusPhaseStart does updating at the start of the plus phase: +// applies Target inputs as External inputs. +func (nt *Network) PlusPhaseStart(ctx *Context) { + nix := GetNetworkIxs(0) + nd := int(nix.NNeurons * ctx.NData) + RunPlusPhaseStartNeuron(nd) +} + +// PlusPhase does updating after end of plus phase +func (nt *Network) PlusPhase(ctx *Context) { + nix := GetNetworkIxs(0) + nd := int(nix.NNeurons * ctx.NData) + pd := int(nix.NPools * ctx.NData) + RunPlusPhasePool(pd) + RunPlusPhaseNeuron(nd) + nt.PlusPhasePost(ctx) + // todo: + // nt.GPU.SyncStateToGPU() +} + +// PlusPhasePost happens on the CPU always. +func (nt *Network) PlusPhasePost(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.PlusPhasePost(ctx) + } +} + +// TargToExt sets external input Ext from target values Target +// This is done at end of MinusPhase to allow targets to drive activity in plus phase. +// This can be called separately to simulate alpha cycles within theta cycles, for example. +func (nt *Network) TargToExt(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.TargToExt(ctx) + } +} + +// ClearTargExt clears external inputs Ext that were set from target values Target. +// This can be called to simulate alpha cycles within theta cycles, for example. +func (nt *Network) ClearTargExt(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.ClearTargExt(ctx) + } +} + +// SpkSt1 saves current acts into SpkSt1 (using CaSpkP) +func (nt *Network) SpkSt1(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.SpkSt1(ctx) + } +} + +// SpkSt2 saves current acts into SpkSt2 (using CaSpkP) +func (nt *Network) SpkSt2(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.SpkSt2(ctx) + } +} + +//////// Learn methods + +// DWt computes the weight change (learning) based on current running-average activation values +func (nt *Network) DWt(ctx *Context) { + nix := GetNetworkIxs(0) + sd := int(nix.NSyns * ctx.NData) + RunDWtSyn(sd) // todo: iterate over groups as needed + RunDWtFromDi(int(nix.NSyns)) +} + +// WtFromDWt updates the weights from delta-weight changes. +// Also does ctx.SlowInc() and calls SlowAdapt at SlowInterval +func (nt *Network) WtFromDWt(ctx *Context) { + // todo: fixme + // nix := GetNetworkIxs(0) + // nn := nix.NNeurons + // RunDWtSubMean(nn) + // RunWtFromDWt(int(nix.NSyns)) + // + // if ctx.SlowInc() { + // nt.SlowAdapt(ctx) + // } +} + //gosl:start //////// Kernels for all parallel CPU / GPU compute are here: @@ -231,156 +376,26 @@ func PlusPhaseNeuron(i uint32) { //gosl:kernel Layers[li].PlusPhaseNeuron(ctx, ni, di) } -//gosl:end - -// InitExt initializes external input state. -// Call prior to applying external inputs to layers. -func (nt *Network) InitExt(ctx *Context) { - // note: important to do this for GPU - // to ensure partial inputs work the same way on CPU and GPU. - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.InitExt(ctx) - } -} - -// ApplyExts applies external inputs to layers, based on values -// that were set in prior layer-specific ApplyExt calls. -// This does nothing on the CPU, but is critical for the GPU, -// and should be added to all sims where GPU will be used. -func (nt *Network) ApplyExts(ctx *Context) { - if !UseGPU { - return - } - nix := GetNetworkIxs(0) - nd := int(nix.NNeurons * ctx.NData) - RunApplyExtsNeuron(nd) -} - -// MinusPhase does updating after end of minus phase. -func (nt *Network) MinusPhase(ctx *Context) { - nix := GetNetworkIxs(0) - nd := int(nix.NNeurons * ctx.NData) - pd := int(nix.NPools * ctx.NData) - RunMinusPhasePool(pd) - RunMinusPhaseNeuron(nd) - nt.MinusPhasePost(ctx) - // todo: - // nt.GPU.SyncStateToGPU() -} - -// MinusPhasePost does special CPU post processing. -func (nt *Network) MinusPhasePost(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.MinusPhasePost(ctx) - } -} - -// PlusPhaseStart does updating at the start of the plus phase: -// applies Target inputs as External inputs. -func (nt *Network) PlusPhaseStart(ctx *Context) { - nix := GetNetworkIxs(0) - nd := int(nix.NNeurons * ctx.NData) - RunPlusPhaseStartNeuron(nd) -} - -// PlusPhase does updating after end of plus phase -func (nt *Network) PlusPhase(ctx *Context) { - nix := GetNetworkIxs(0) - nd := int(nix.NNeurons * ctx.NData) - pd := int(nix.NPools * ctx.NData) - RunPlusPhasePool(pd) - RunPlusPhaseNeuron(nd) - nt.PlusPhasePost(ctx) - // todo: - // nt.GPU.SyncStateToGPU() -} - -// PlusPhasePost happens on the CPU always. -func (nt *Network) PlusPhasePost(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.PlusPhasePost(ctx) - } -} - -// TargToExt sets external input Ext from target values Target -// This is done at end of MinusPhase to allow targets to drive activity in plus phase. -// This can be called separately to simulate alpha cycles within theta cycles, for example. -func (nt *Network) TargToExt(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.TargToExt(ctx) - } -} - -// ClearTargExt clears external inputs Ext that were set from target values Target. -// This can be called to simulate alpha cycles within theta cycles, for example. -func (nt *Network) ClearTargExt(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.ClearTargExt(ctx) - } -} - -// SpkSt1 saves current acts into SpkSt1 (using CaSpkP) -func (nt *Network) SpkSt1(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.SpkSt1(ctx) - } -} - -// SpkSt2 saves current acts into SpkSt2 (using CaSpkP) -func (nt *Network) SpkSt2(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.SpkSt2(ctx) - } +// DWtSyn is the kernel over Synapses * Data for computing weight change +// (learning). +func DWtSyn(i uint32) { //gosl:kernel + ctx := GetCtx(0) + di := ctx.DataIndex(i) + syni := ctx.ItemIndex(i) + pti := SynapseIxs.Value(int(SynPathIndex), int(syni)) + si := SynapseIxs.Value(int(SynSendIndex), int(syni)) + ri := SynapseIxs.Value(int(SynRecvIndex), int(syni)) + Paths[pti].DWtSyn(ctx, syni, si, ri, di) } -////////////////////////////////////////////////////////////////////////////////////// -// Learn methods - -// DWt computes the weight change (learning) based on current running-average activation values -func (nt *Network) DWt(ctx *Context) { - // if nt.GPU.On { - // nt.GPU.RunDWt() - // return - // } - // - // nt.NeuronMapPar(ctx, func(ly *Layer, ni uint32) { ly.DWt(ctx, ni) }, "DWt") +// DWtFromDi is the kernel over Synapses for integrating DWt over Di. +func DWtFromDi(syni uint32) { //gosl:kernel + ctx := GetCtx(0) + pti := SynapseIxs.Value(int(SynPathIndex), int(syni)) + Paths[pti].DWtFromDi(ctx, syni) } -// WtFromDWt updates the weights from delta-weight changes. -// Also does ctx.SlowInc() and calls SlowAdapt at SlowInterval -func (nt *Network) WtFromDWt(ctx *Context) { - // nt.LayerMapSeq(func(ly *Layer) { ly.WtFromDWtLayer(ctx) }, "WtFromDWtLayer") // lightweight - // // if nt.GPU.On { - // // nt.GPU.RunWtFromDWt() - // // } else { - // nt.NeuronMapPar(ctx, func(ly *Layer, ni uint32) { ly.DWtSubMean(ctx, ni) }, "DWtSubMean") - // nt.NeuronMapPar(ctx, func(ly *Layer, ni uint32) { ly.WtFromDWt(ctx, ni) }, "WtFromDWt") - // } - if ctx.SlowInc() { - nt.SlowAdapt(ctx) - } -} +//gosl:end // SlowAdapt is the layer-level slow adaptation functions: Synaptic scaling, // and adapting inhibition diff --git a/axon/network.goal b/axon/network.goal index 521571d69..09b1ba043 100644 --- a/axon/network.goal +++ b/axon/network.goal @@ -94,6 +94,150 @@ func (nt *Network) Cycle() { // } } +// InitExt initializes external input state. +// Call prior to applying external inputs to layers. +func (nt *Network) InitExt(ctx *Context) { + // note: important to do this for GPU + // to ensure partial inputs work the same way on CPU and GPU. + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.InitExt(ctx) + } +} + +// ApplyExts applies external inputs to layers, based on values +// that were set in prior layer-specific ApplyExt calls. +// This does nothing on the CPU, but is critical for the GPU, +// and should be added to all sims where GPU will be used. +func (nt *Network) ApplyExts(ctx *Context) { + if !UseGPU { + return + } + nix := GetNetworkIxs(0) + nd := int(nix.NNeurons * ctx.NData) + RunApplyExtsNeuron(nd) +} + +// MinusPhase does updating after end of minus phase. +func (nt *Network) MinusPhase(ctx *Context) { + nix := GetNetworkIxs(0) + nd := int(nix.NNeurons * ctx.NData) + pd := int(nix.NPools * ctx.NData) + RunMinusPhasePool(pd) + RunMinusPhaseNeuron(nd) + nt.MinusPhasePost(ctx) + // todo: + // nt.GPU.SyncStateToGPU() +} + +// MinusPhasePost does special CPU post processing. +func (nt *Network) MinusPhasePost(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.MinusPhasePost(ctx) + } +} + +// PlusPhaseStart does updating at the start of the plus phase: +// applies Target inputs as External inputs. +func (nt *Network) PlusPhaseStart(ctx *Context) { + nix := GetNetworkIxs(0) + nd := int(nix.NNeurons * ctx.NData) + RunPlusPhaseStartNeuron(nd) +} + +// PlusPhase does updating after end of plus phase +func (nt *Network) PlusPhase(ctx *Context) { + nix := GetNetworkIxs(0) + nd := int(nix.NNeurons * ctx.NData) + pd := int(nix.NPools * ctx.NData) + RunPlusPhasePool(pd) + RunPlusPhaseNeuron(nd) + nt.PlusPhasePost(ctx) + // todo: + // nt.GPU.SyncStateToGPU() +} + +// PlusPhasePost happens on the CPU always. +func (nt *Network) PlusPhasePost(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.PlusPhasePost(ctx) + } +} + +// TargToExt sets external input Ext from target values Target +// This is done at end of MinusPhase to allow targets to drive activity in plus phase. +// This can be called separately to simulate alpha cycles within theta cycles, for example. +func (nt *Network) TargToExt(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.TargToExt(ctx) + } +} + +// ClearTargExt clears external inputs Ext that were set from target values Target. +// This can be called to simulate alpha cycles within theta cycles, for example. +func (nt *Network) ClearTargExt(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.ClearTargExt(ctx) + } +} + +// SpkSt1 saves current acts into SpkSt1 (using CaSpkP) +func (nt *Network) SpkSt1(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.SpkSt1(ctx) + } +} + +// SpkSt2 saves current acts into SpkSt2 (using CaSpkP) +func (nt *Network) SpkSt2(ctx *Context) { + for _, ly := range nt.Layers { + if ly.Off { + continue + } + ly.SpkSt2(ctx) + } +} + +//////// Learn methods + +// DWt computes the weight change (learning) based on current running-average activation values +func (nt *Network) DWt(ctx *Context) { + nix := GetNetworkIxs(0) + sd := int(nix.NSyns * ctx.NData) + RunDWtSyn(sd) // todo: iterate over groups as needed + RunDWtFromDi(int(nix.NSyns)) +} + +// WtFromDWt updates the weights from delta-weight changes. +// Also does ctx.SlowInc() and calls SlowAdapt at SlowInterval +func (nt *Network) WtFromDWt(ctx *Context) { + // todo: fixme + // nix := GetNetworkIxs(0) + // nn := nix.NNeurons + // RunDWtSubMean(nn) + // RunWtFromDWt(int(nix.NSyns)) + // if ctx.SlowInc() { + // nt.SlowAdapt(ctx) + // } +} + //gosl:start //////// Kernels for all parallel CPU / GPU compute are here: @@ -225,155 +369,27 @@ func PlusPhaseNeuron(i uint32) { //gosl:kernel Layers[li].PlusPhaseNeuron(ctx, ni, di) } -//gosl:end - -// InitExt initializes external input state. -// Call prior to applying external inputs to layers. -func (nt *Network) InitExt(ctx *Context) { - // note: important to do this for GPU - // to ensure partial inputs work the same way on CPU and GPU. - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.InitExt(ctx) - } -} - -// ApplyExts applies external inputs to layers, based on values -// that were set in prior layer-specific ApplyExt calls. -// This does nothing on the CPU, but is critical for the GPU, -// and should be added to all sims where GPU will be used. -func (nt *Network) ApplyExts(ctx *Context) { - if !UseGPU { - return - } - nix := GetNetworkIxs(0) - nd := int(nix.NNeurons * ctx.NData) - RunApplyExtsNeuron(nd) -} - -// MinusPhase does updating after end of minus phase. -func (nt *Network) MinusPhase(ctx *Context) { - nix := GetNetworkIxs(0) - nd := int(nix.NNeurons * ctx.NData) - pd := int(nix.NPools * ctx.NData) - RunMinusPhasePool(pd) - RunMinusPhaseNeuron(nd) - nt.MinusPhasePost(ctx) - // todo: - // nt.GPU.SyncStateToGPU() -} - -// MinusPhasePost does special CPU post processing. -func (nt *Network) MinusPhasePost(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.MinusPhasePost(ctx) - } -} - -// PlusPhaseStart does updating at the start of the plus phase: -// applies Target inputs as External inputs. -func (nt *Network) PlusPhaseStart(ctx *Context) { - nix := GetNetworkIxs(0) - nd := int(nix.NNeurons * ctx.NData) - RunPlusPhaseStartNeuron(nd) -} - -// PlusPhase does updating after end of plus phase -func (nt *Network) PlusPhase(ctx *Context) { - nix := GetNetworkIxs(0) - nd := int(nix.NNeurons * ctx.NData) - pd := int(nix.NPools * ctx.NData) - RunPlusPhasePool(pd) - RunPlusPhaseNeuron(nd) - nt.PlusPhasePost(ctx) - // todo: - // nt.GPU.SyncStateToGPU() -} - -// PlusPhasePost happens on the CPU always. -func (nt *Network) PlusPhasePost(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.PlusPhasePost(ctx) - } -} - -// TargToExt sets external input Ext from target values Target -// This is done at end of MinusPhase to allow targets to drive activity in plus phase. -// This can be called separately to simulate alpha cycles within theta cycles, for example. -func (nt *Network) TargToExt(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.TargToExt(ctx) - } -} - -// ClearTargExt clears external inputs Ext that were set from target values Target. -// This can be called to simulate alpha cycles within theta cycles, for example. -func (nt *Network) ClearTargExt(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.ClearTargExt(ctx) - } -} - -// SpkSt1 saves current acts into SpkSt1 (using CaSpkP) -func (nt *Network) SpkSt1(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.SpkSt1(ctx) - } +// DWtSyn is the kernel over Synapses * Data for computing weight change +// (learning). +func DWtSyn(i uint32) { //gosl:kernel + ctx := GetCtx(0) + di := ctx.DataIndex(i) + syni := ctx.ItemIndex(i) + pti := SynapseIxs[SynPathIndex, syni] + si := SynapseIxs[SynSendIndex, syni] + ri := SynapseIxs[SynRecvIndex, syni] + Paths[pti].DWtSyn(ctx, syni, si, ri, di) } -// SpkSt2 saves current acts into SpkSt2 (using CaSpkP) -func (nt *Network) SpkSt2(ctx *Context) { - for _, ly := range nt.Layers { - if ly.Off { - continue - } - ly.SpkSt2(ctx) - } +// DWtFromDi is the kernel over Synapses for integrating DWt over Di. +func DWtFromDi(syni uint32) { //gosl:kernel + ctx := GetCtx(0) + pti := SynapseIxs[SynPathIndex, syni] + Paths[pti].DWtFromDi(ctx, syni) } -////////////////////////////////////////////////////////////////////////////////////// -// Learn methods - -// DWt computes the weight change (learning) based on current running-average activation values -func (nt *Network) DWt(ctx *Context) { - // if nt.GPU.On { - // nt.GPU.RunDWt() - // return - // } - // nt.NeuronMapPar(ctx, func(ly *Layer, ni uint32) { ly.DWt(ctx, ni) }, "DWt") -} +//gosl:end -// WtFromDWt updates the weights from delta-weight changes. -// Also does ctx.SlowInc() and calls SlowAdapt at SlowInterval -func (nt *Network) WtFromDWt(ctx *Context) { - // nt.LayerMapSeq(func(ly *Layer) { ly.WtFromDWtLayer(ctx) }, "WtFromDWtLayer") // lightweight - // // if nt.GPU.On { - // // nt.GPU.RunWtFromDWt() - // // } else { - // nt.NeuronMapPar(ctx, func(ly *Layer, ni uint32) { ly.DWtSubMean(ctx, ni) }, "DWtSubMean") - // nt.NeuronMapPar(ctx, func(ly *Layer, ni uint32) { ly.WtFromDWt(ctx, ni) }, "WtFromDWt") - // } - if ctx.SlowInc() { - nt.SlowAdapt(ctx) - } -} // SlowAdapt is the layer-level slow adaptation functions: Synaptic scaling, // and adapting inhibition diff --git a/axon/path-algo.go b/axon/path-algo.go index c767bf120..6dca3a66a 100644 --- a/axon/path-algo.go +++ b/axon/path-algo.go @@ -11,32 +11,6 @@ package axon ////////////////////////////////////////////////////////////////////////////////////// // Learn methods -// DWt computes the weight change (learning), based on -// synaptically integrated spiking, computed at the Theta cycle interval. -// This is the trace version for hidden units, and uses syn CaP - CaD for targets. -func (pj *Path) DWt(ctx *Context, si uint32) { - if pj.Params.Learn.Learn.IsFalse() { - return - } - - scon := pj.SendCon[si-pj.Send.NeurStIndex] - rlay := pj.Recv - isTarget := rlay.Params.Acts.Clamp.IsTarget.IsTrue() - for syi := scon.Start; syi < scon.Start+scon.N; syi++ { - syni := pj.SynStIndex + syi - ri := SynapseIxs.Value(int(SynRecvIndex), int(syni)) - dwt := float32(0) - for di := uint32(0); di < ctx.NData; di++ { - lpi := rlay.Params.PoolIndex(0) - pi := rlay.Params.PoolIndex(NeuronIxs.Value(int(NrnSubPool), int(ri))) - pj.Params.DWtSyn(ctx, syni, si, ri, lpi, pi, di, isTarget) - dwt += SynapseTraces.Value(int(DiDWt), int(syni), int(di)) - } - // note: on GPU, this must be a separate kernel, but can be combined here - Synapses.SetAdd(dwt, int(DWt), int(syni)) - } -} - // DWtSubMean subtracts the mean from any pathways that have SubMean > 0. // This is called on *receiving* pathways, prior to WtFromDwt. func (pj *Path) DWtSubMean(ctx *Context, ri uint32) { diff --git a/axon/path-algo.goal b/axon/path-algo.goal index 625b3dfbe..08c3f561c 100644 --- a/axon/path-algo.goal +++ b/axon/path-algo.goal @@ -9,32 +9,6 @@ package axon ////////////////////////////////////////////////////////////////////////////////////// // Learn methods -// DWt computes the weight change (learning), based on -// synaptically integrated spiking, computed at the Theta cycle interval. -// This is the trace version for hidden units, and uses syn CaP - CaD for targets. -func (pj *Path) DWt(ctx *Context, si uint32) { - if pj.Params.Learn.Learn.IsFalse() { - return - } - - scon := pj.SendCon[si-pj.Send.NeurStIndex] - rlay := pj.Recv - isTarget := rlay.Params.Acts.Clamp.IsTarget.IsTrue() - for syi := scon.Start; syi < scon.Start+scon.N; syi++ { - syni := pj.SynStIndex + syi - ri := SynapseIxs[SynRecvIndex, syni] - dwt := float32(0) - for di := uint32(0); di < ctx.NData; di++ { - lpi := rlay.Params.PoolIndex(0) - pi := rlay.Params.PoolIndex(NeuronIxs[NrnSubPool, ri]) - pj.Params.DWtSyn(ctx, syni, si, ri, lpi, pi, di, isTarget) - dwt += SynapseTraces[DiDWt, syni, di] - } - // note: on GPU, this must be a separate kernel, but can be combined here - Synapses[DWt, syni] += dwt - } -} - // DWtSubMean subtracts the mean from any pathways that have SubMean > 0. // This is called on *receiving* pathways, prior to WtFromDwt. func (pj *Path) DWtSubMean(ctx *Context, ri uint32) { diff --git a/axon/pathparams.go b/axon/pathparams.go index 9311b8b28..d60fdcb5e 100644 --- a/axon/pathparams.go +++ b/axon/pathparams.go @@ -321,7 +321,15 @@ func (pt *PathParams) SendSpike(ctx *Context, ni, di, lni uint32) { // DWtSyn is the overall entry point for weight change (learning) at given synapse. // It selects appropriate function based on pathway type. // rpl is the receiving layer SubPool -func (pt *PathParams) DWtSyn(ctx *Context, syni, si, ri, lpi, pi, di uint32, isTarget bool) { +func (pt *PathParams) DWtSyn(ctx *Context, syni, si, ri, di uint32) { + if pt.Learn.Learn == 0 { + return + } + rlay := &Layers[pt.Indexes.RecvLayer] + isTarget := rlay.Acts.Clamp.IsTarget > 0 + spi := NeuronIxs.Value(int(NrnSubPool), int(ri)) + pi := rlay.PoolIndex(spi) + lpi := rlay.PoolIndex(0) switch pt.PathType { case RWPath: pt.DWtSynRWPred(ctx, syni, si, ri, lpi, pi, di) @@ -658,11 +666,10 @@ func (pt *PathParams) DWtSynVSPatch(ctx *Context, syni, si, ri, lpi, pi, di uint SynapseTraces.Set(dwt, int(DiDWt), int(syni), int(di)) } -/////////////////////////////////////////////////// -// WtFromDWt +//////// WtFromDWt -// DWtFromDiDWtSyn updates DWt from data parallel DiDWt values -func (pt *PathParams) DWtFromDiDWtSyn(ctx *Context, syni uint32) { +// DWtFromDi updates DWt from data parallel DiDWt values +func (pt *PathParams) DWtFromDi(ctx *Context, syni uint32) { dwt := float32(0) for di := uint32(0); di < ctx.NData; di++ { dwt += SynapseTraces.Value(int(DiDWt), int(syni), int(di)) diff --git a/axon/pathparams.goal b/axon/pathparams.goal index f106af3b0..8e9714609 100644 --- a/axon/pathparams.goal +++ b/axon/pathparams.goal @@ -319,7 +319,15 @@ func (pt *PathParams) SendSpike(ctx *Context, ni, di, lni uint32) { // DWtSyn is the overall entry point for weight change (learning) at given synapse. // It selects appropriate function based on pathway type. // rpl is the receiving layer SubPool -func (pt *PathParams) DWtSyn(ctx *Context, syni, si, ri, lpi, pi, di uint32, isTarget bool) { +func (pt *PathParams) DWtSyn(ctx *Context, syni, si, ri, di uint32) { + if pt.Learn.Learn == 0 { + return + } + rlay := &Layers[pt.Indexes.RecvLayer] + isTarget := rlay.Acts.Clamp.IsTarget > 0 + spi := NeuronIxs[NrnSubPool, ri] + pi := rlay.PoolIndex(spi) + lpi := rlay.PoolIndex(0) switch pt.PathType { case RWPath: pt.DWtSynRWPred(ctx, syni, si, ri, lpi, pi, di) @@ -656,11 +664,10 @@ func (pt *PathParams) DWtSynVSPatch(ctx *Context, syni, si, ri, lpi, pi, di uint SynapseTraces[DiDWt, syni, di] = dwt } -/////////////////////////////////////////////////// -// WtFromDWt +//////// WtFromDWt -// DWtFromDiDWtSyn updates DWt from data parallel DiDWt values -func (pt *PathParams) DWtFromDiDWtSyn(ctx *Context, syni uint32) { +// DWtFromDi updates DWt from data parallel DiDWt values +func (pt *PathParams) DWtFromDi(ctx *Context, syni uint32) { dwt := float32(0) for di := uint32(0); di < ctx.NData; di++ { dwt += SynapseTraces[DiDWt, syni, di]