Skip to content

Commit

Permalink
pca epochs now more in line with prior with tensor speed improvements…
Browse files Browse the repository at this point in the history
…. start on prevcorsim stats.
  • Loading branch information
rcoreilly committed Dec 2, 2024
1 parent d05a547 commit f810bf5
Show file tree
Hide file tree
Showing 7 changed files with 74 additions and 15 deletions.
2 changes: 2 additions & 0 deletions axon/network.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions axon/network.goal
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"cogentcore.org/core/base/timer"
"cogentcore.org/core/core"
"cogentcore.org/core/goal/gosl/sltensor"
"cogentcore.org/core/gpu"
"cogentcore.org/core/icons"
"cogentcore.org/core/tensor"
"cogentcore.org/core/texteditor"
Expand Down Expand Up @@ -1040,6 +1041,7 @@ func (nt *Network) SetAsCurrent() {
PathGSyns = &nt.PathGSyns
Synapses = &nt.Synapses
SynapseTraces = &nt.SynapseTraces
gpu.NumThreads = nt.NThreads
}

// DeleteAll deletes all layers, prepares network for re-configuring and building
Expand Down
File renamed without changes.
60 changes: 55 additions & 5 deletions axon/simstats.go
Original file line number Diff line number Diff line change
Expand Up @@ -354,8 +354,7 @@ func StatLayerState(statsDir *tensorfs.Node, net *Network, smode, slevel enums.E
}
}

// PCAStrongThr is the threshold for counting PCA eigenvalues as "strong"
// Applies to SVD as well.
// PCAStrongThr is the threshold for counting PCA eigenvalues as "strong".
var PCAStrongThr = 0.01

// StatPCA returns a Stats function that computes PCA NStrong, Top5, Next5, and Rest
Expand Down Expand Up @@ -385,8 +384,6 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
sizes := []int{ndata}
sizes = append(sizes, ly.GetSampleShape().Sizes...)
vtsr := pcaDir.Float64(lnm, sizes...)
vecs := curModeDir.Float64("PCA_Vecs_" + lnm)
vals := curModeDir.Float64("PCA_Vals_" + lnm)
if levi == 0 {
ltsr := curModeDir.Float64("PCA_ActM_"+lnm, ly.GetSampleShape().Sizes...)
if start {
Expand All @@ -404,9 +401,10 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
if !start && levi == 1 {
if interval > 0 && epc%interval == 0 {
hasNew = true
vals := curModeDir.Float64("PCA_Vals_" + lnm)
covar := curModeDir.Float64("PCA_Covar_" + lnm)
metric.CovarianceMatrixOut(metric.Covariance, vtsr, covar)
matrix.SVDOut(covar, vecs, vals)
matrix.SVDValuesOut(covar, vals)
ln := vals.Len()
for i := range ln {
v := vals.Float1D(i)
Expand Down Expand Up @@ -464,3 +462,55 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
}
}
}

// StatPrevCorSim returns a Stats function that compute correlations
// between previous trial activity state and current minus phase and
// plus phase state. This is important for predictive learning.
func StatPrevCorSim(statsDir *tensorfs.Node, net *Network, trialLevel enums.Enum, layerNames ...string) func(mode, level enums.Enum, start bool) {
statNames := []string{"PrevMCorSim", "PrevPCorSim"}
levels := make([]enums.Enum, 10) // should be enough
return func(mode, level enums.Enum, start bool) {
levi := int(level.Int64() - trialLevel.Int64())
if levi < 0 {
return
}
levels[levi] = level
modeDir := statsDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
ndata := net.Context().NData
for _, lnm := range layerNames {
for si, statName := range statNames {
ly := net.LayerByName(lnm)
name := lnm + "_" + statName
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
if ps := plot.GetStylersFrom(tsr); ps == nil {
ps.Add(func(s *plot.Style) {
s.Range.SetMin(0).SetMax(1)
})
plot.SetStylersTo(tsr, ps)
}
continue
}
switch levi {
case 0:
for di := range ndata {
var stat float64
switch si {
case 0:
stat = 1.0 - float64(LayerStates.Value(int(ly.Index), int(di), int(LayerPhaseDiff)))
case 1:
stat = 1.0 - float64(LayerStates.Value(int(ly.Index), int(di), int(LayerPhaseDiff)))
}
tsr.AppendRowFloat(stat)
}
default:
subd := modeDir.RecycleDir(levels[levi-1].String())
stat := stats.StatMean.Call(subd.Value(name))
tsr.AppendRow(stat)
}
}
}
}
}
2 changes: 2 additions & 0 deletions axon/threads.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"time"

"cogentcore.org/core/base/timer"
"cogentcore.org/core/gpu"
)

// SetNThreads sets number of threads to use for CPU parallel processing.
Expand All @@ -28,6 +29,7 @@ func (nt *Network) SetNThreads(nthr int) {
}
}
nt.NThreads = min(maxProcs, nthr)
gpu.NumThreads = nt.NThreads
}

//////////////////////////////////////////////////////////////
Expand Down
21 changes: 12 additions & 9 deletions examples/deep_move/deep_move.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
"github.com/emer/axon/v2/axon"
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/looper"
"github.com/emer/emergent/v2/netview"
"github.com/emer/emergent/v2/paths"
Expand Down Expand Up @@ -130,6 +129,9 @@ func (ss *Sim) Run() {
tensorfs.CurRoot = ss.Root
ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag)
if ss.Config.Params.Hid2 {
ss.Params.ExtraSheets = "Hid2"
}
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
if ss.Config.Run.GPU {
Expand Down Expand Up @@ -162,13 +164,13 @@ func (ss *Sim) ConfigEnv() {
trn = &MoveEnv{}
tst = &MoveEnv{}
} else {
trn = ss.Envs.ByModeDi(etime.Train, di).(*MoveEnv)
tst = ss.Envs.ByModeDi(etime.Test, di).(*MoveEnv)
trn = ss.Envs.ByModeDi(Train, di).(*MoveEnv)
tst = ss.Envs.ByModeDi(Test, di).(*MoveEnv)
}

// note: names must be standard here!
trn.Defaults()
trn.Name = env.ModeDi(etime.Train, di)
trn.Name = env.ModeDi(Train, di)
trn.Debug = false
trn.RandSeed = 73 + int64(di)*73
if ss.Config.Env.Env != nil {
Expand All @@ -178,7 +180,7 @@ func (ss *Sim) ConfigEnv() {
trn.Validate()

tst.Defaults()
tst.Name = env.ModeDi(etime.Test, di)
tst.Name = env.ModeDi(Test, di)
tst.RandSeed = 181 + int64(di)*181
if ss.Config.Env.Env != nil {
reflectx.SetFieldsFromMap(tst, ss.Config.Env.Env)
Expand All @@ -198,7 +200,7 @@ func (ss *Sim) ConfigNet(net *axon.Network) {
net.SetMaxData(ss.Config.Run.NData)
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0

ev := ss.Envs.ByModeDi(etime.Train, 0).(*MoveEnv)
ev := ss.Envs.ByModeDi(Train, 0).(*MoveEnv)

net.SetMaxData(ss.Config.Run.NData)
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
Expand Down Expand Up @@ -571,7 +573,7 @@ func (ss *Sim) ConfigStats() {

// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
statNames := []string{"Depth_CorSim", "HeadDir_CorSim"}
statNames := []string{"Depth_CorSim", "HeadDir_CorSim", "Depth_}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
modeDir := ss.Stats.RecycleDir(mode.String())
Expand Down Expand Up @@ -618,7 +620,7 @@ func (ss *Sim) ConfigStats() {
}
})

perTrlFunc := axon.StatPerTrialMSec(ss.Stats, "Depth_CorSim", Train, Trial)
perTrlFunc := axon.StatPerTrialMSec(ss.Stats, "DepthP_CorSim", Train, Trial)
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
perTrlFunc(mode, level, phase == Start)
})
Expand Down Expand Up @@ -655,7 +657,7 @@ func (ss *Sim) StatCounters(mode, level enums.Enum) string {
return counters
}
counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"CorSim", "UnitErr", "Err"}
statNames := []string{"Depth_CorSim", "HeadDir_CorSim"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
Expand Down Expand Up @@ -749,3 +751,4 @@ func (ss *Sim) RunNoGUI() {
axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
axon.GPURelease()
}

2 changes: 1 addition & 1 deletion examples/ra25/ra25.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ type Config struct {
URL string `default:"https://github.com/emer/axon/blob/main/examples/ra25/README.md"`

// Doc is brief documentation of the sim.
Doc string `width:"60" default:"This demonstrates a basic Axon model and provides a template for creating new models. It has a random-associator four-layer axon network that uses the standard supervised learning paradigm to learn mappings between 25 random input / output patterns defined over 5x5 input / output layers (i.e., 25 units)."`
Doc string `width:"60" default:"This demonstrates a basic Axon model and provides a template for creating new models. It has a random-associator four-layer axon network that uses the standard supervised learning paradigm to learn mappings between 25 random input / output patterns defined over 5x5 input / output layers."`

// Includes has a list of additional config files to include.
// After configuration, it contains list of include files added.
Expand Down

0 comments on commit f810bf5

Please sign in to comment.