Skip to content

Commit

Permalink
NewState implemented in GPU kernel; now learning as effectively in GP…
Browse files Browse the repository at this point in the history
…U as CPU. also fixed tests
  • Loading branch information
rcoreilly committed Nov 22, 2024
1 parent 6645131 commit a23b155
Show file tree
Hide file tree
Showing 17 changed files with 349 additions and 230 deletions.
38 changes: 32 additions & 6 deletions axon/act-layer.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

38 changes: 32 additions & 6 deletions axon/act-layer.goal
Original file line number Diff line number Diff line change
Expand Up @@ -849,6 +849,38 @@ func (ly *LayerParams) CyclePostVSPatchLayer(ctx *Context, pi, di uint32, spi in

//////// Phase timescale

// NewStateLayer does NewState at the layer level, called
func (ly *LayerParams) NewStateLayer(ctx *Context) {
actMinusAvg := float32(0)
actPlusAvg := float32(0)
np := uint32(ly.Indexes.NPools)

for di := uint32(0); di < ctx.NData; di++ {
lpi := ly.PoolIndex(0)

actMinusAvg += PoolAvgMax(AMAct, AMMinus, Avg, lpi, di)
actPlusAvg += PoolAvgMax(AMAct, AMPlus, Avg, lpi, di)

ly.Acts.Clamp.IsInput.SetBool(ly.IsInput())
ly.Acts.Clamp.IsTarget.SetBool(ly.IsTarget())
LayerStates[ly.Index, LayerRT, di] = -1.0

for spi := uint32(0); spi < np; spi++ {
pi := ly.PoolIndex(spi)
ly.NewStatePool(ctx, pi, di) // also calls DecayState on pool
}
}

// note: long-running averages must be based on aggregate data, drive adaptation
// of Gi layer inhibition.
davg := 1 / float32(ctx.NData)
actMinusAvg *= davg
actPlusAvg *= davg
for di := uint32(0); di < ctx.NData; di++ {
ly.NewStateLayerActAvg(ctx, di, actMinusAvg, actPlusAvg)
}
}

// NewStateLayerActAvg updates ActAvg.ActMAvg and ActPAvg based on current values
// that have been averaged across NData already.
func (ly *LayerParams) NewStateLayerActAvg(ctx *Context, di uint32, actMinusAvg, actPlusAvg float32) {
Expand All @@ -860,12 +892,6 @@ func (ly *LayerParams) NewStateLayerActAvg(ctx *Context, di uint32, actMinusAvg,
LayerStates[ly.Index, LayerActPAvg, di] = pavg
}

func (ly *LayerParams) NewStateLayer(ctx *Context, di uint32) {
ly.Acts.Clamp.IsInput.SetBool(ly.IsInput())
ly.Acts.Clamp.IsTarget.SetBool(ly.IsTarget())
LayerStates[ly.Index, LayerRT, di] = -1
}

func (ly *LayerParams) NewStatePool(ctx *Context, pi, di uint32) {
PoolsInt[pi, Clamped, di] = 0
if ly.Acts.Clamp.Add.IsFalse() && ly.Acts.Clamp.IsInput.IsTrue() {
Expand Down
41 changes: 41 additions & 0 deletions axon/act-net.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

41 changes: 41 additions & 0 deletions axon/act-net.goal
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

package axon

import "cogentcore.org/core/enums"

// todo: don't even need layer-level ultimately.

// Cycle runs n cycles of activation updating.
Expand Down Expand Up @@ -55,6 +57,21 @@ func (nt *Network) Cycle(ncyc int, getNeurons bool) {
// }
}

// NewState handles all initialization at start of new input pattern.
// This is called *before* applying external input data and operates across
// all data parallel values. The current Context.NData should be set
// properly prior to calling this and subsequent Cycle methods.
func (nt *Network) NewState(mode enums.Enum, testing bool) {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
ctx.NewState(mode, testing)
ToGPUCtxGlobal()
RunNewStateLayer(int(nix.NLayers))
RunNewStateNeuron(nd)
RunInitGBuffsPath(int(nix.NPaths))
}

// InitExt initializes external input state.
// Call prior to applying external inputs to layers.
func (nt *Network) InitExt() {
Expand Down Expand Up @@ -289,6 +306,30 @@ func ApplyExtsNeuron(i uint32) { //gosl:kernel
Layers[li].ApplyExtsNeuron(ni, di)
}

// NewStateLayer is the kernel over Layers (not Data)
// which does new state on pools as well.
func NewStateLayer(li uint32) { //gosl:kernel
ctx := GetCtx(0)
Layers[li].NewStateLayer(ctx)
}

// NewStateNeuron is the kernel over Neurons * Data to
// do new state on neurons (decay).
func NewStateNeuron(i uint32) { //gosl:kernel
ctx := GetCtx(0)
di := ctx.DataIndex(i)
ni := ctx.ItemIndex(i)
li := NeuronIxs[ni, NrnLayIndex]
Layers[li].NewStateNeuron(ctx, ni, di)
}

// InitGBuffsPath is the kernel over Paths to
// initialize PathGBuf, PathGSyns.
func InitGBuffsPath(pti uint32) { //gosl:kernel
ctx := GetCtx(0)
Paths[pti].InitGBuffs(ctx)
}

// MinusPhasePool is the kernel over Pools * Data to
// do pool-level updating after end of minus phase.
func MinusPhasePool(i uint32) { //gosl:kernel
Expand Down
14 changes: 7 additions & 7 deletions axon/act-path.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 7 additions & 7 deletions axon/act-path.goal
Original file line number Diff line number Diff line change
Expand Up @@ -304,21 +304,21 @@ func (pt *PathParams) SendSpike(ctx *Context, ni, di, lni uint32) {
// This is not typically needed (called during InitWeights, InitActs)
// but can be called when needed. Must be called to completely initialize
// prior activity, e.g., full Glong clearing.
func (pt *PathParams) InitGBuffs() {
func (pt *PathParams) InitGBuffs(ctx *Context) {
nix := GetNetworkIxs(0)
maxd := nix.MaxData
mdel := nix.MaxDelay + 1
rnn := pt.Indexes.RecvNeurN
npst := pt.Indexes.NPathNeurSt
for dl := range mdel {
for ri := range rnn {
for di := range maxd {
PathGBuf[npst+ri, dl, di] = 0.0
for dl := uint32(0); dl < mdel; dl++ {
for ri := uint32(0); ri < rnn; ri++ {
for di := uint32(0); di < maxd; di++ {
PathGBuf[npst+ri, dl, di] = 0
}
}
}
for ri := range rnn {
for di := range maxd {
for ri := uint32(0); ri < rnn; ri++ {
for di := uint32(0); di < maxd; di++ {
PathGSyns[npst+ri, di] = 0.0
}
}
Expand Down
22 changes: 11 additions & 11 deletions axon/act.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

22 changes: 11 additions & 11 deletions axon/act.goal
Original file line number Diff line number Diff line change
Expand Up @@ -889,15 +889,15 @@ func (ac *ActParams) DecayAHP(ctx *Context, ni, di uint32, decay float32) {
// Called with ac.Decay.Act by Layer during NewState
func (ac *ActParams) DecayState(ctx *Context, ni, di uint32, decay, glong, ahp float32) {
// always reset these -- otherwise get insanely large values that take forever to update
Neurons[ni, ISIAvg, di] = -1
Neurons[ni, ISIAvg, di] = -1.0
Neurons[ni, ActInt, di] = ac.Init.Act
Neurons[ni, Spiked, di] = 0
Neurons[ni, Spiked, di] = 0.0
for i := range 8 {
Neurons[ni, SpkBin0+NeuronVars(i), di] = 0.0
}

if decay > 0 { // no-op for most, but not all..
Neurons[ni, Spike, di] = 0
Neurons[ni, Spike, di] = 0.0
Neurons[ni, Act, di] -= decay * (Neurons[ni, Act, di] - ac.Init.Act)
Neurons[ni, ActInt, di] -= decay * (Neurons[ni, ActInt, di] - ac.Init.Act)
Neurons[ni, GeSyn, di] -= decay * (Neurons[ni, GeSyn, di] - NeuronAvgs[ni, GeBase])
Expand Down Expand Up @@ -943,14 +943,14 @@ func (ac *ActParams) DecayState(ctx *Context, ni, di uint32, decay, glong, ahp f
ac.DecayLearnCa(ctx, ni, di, ac.Decay.LearnCa)
}

Neurons[ni, Inet, di] = 0
Neurons[ni, GeRaw, di] = 0
Neurons[ni, GiRaw, di] = 0
Neurons[ni, GModRaw, di] = 0
Neurons[ni, GModSyn, di] = 0
Neurons[ni, GMaintRaw, di] = 0
Neurons[ni, SSGiDend, di] = 0
Neurons[ni, GeExt, di] = 0
Neurons[ni, Inet, di] = 0.0
Neurons[ni, GeRaw, di] = 0.0
Neurons[ni, GiRaw, di] = 0.0
Neurons[ni, GModRaw, di] = 0.0
Neurons[ni, GModSyn, di] = 0.0
Neurons[ni, GMaintRaw, di] = 0.0
Neurons[ni, SSGiDend, di] = 0.0
Neurons[ni, GeExt, di] = 0.0

Neurons[ni, CtxtGeOrig, di] -= glong * Neurons[ni, CtxtGeOrig, di]
}
Expand Down
Loading

0 comments on commit a23b155

Please sign in to comment.