diff --git a/axon/network.go b/axon/network.go
index 78241637..12a434ae 100644
--- a/axon/network.go
+++ b/axon/network.go
@@ -334,7 +334,7 @@ func (nt *Network) UpdateLayerMaps() {
nt.UpdateLayerNameMap()
nt.LayerClassMap = make(map[string][]string)
for _, ly := range nt.Layers {
- cs := ly.Type.String() + ly.Class
+ cs := ly.Type.String() + " " + ly.Class
cls := strings.Split(cs, " ")
for _, cl := range cls {
if cl == "" {
diff --git a/axon/network.goal b/axon/network.goal
index 783af0c8..2f88f8e2 100644
--- a/axon/network.goal
+++ b/axon/network.goal
@@ -331,7 +331,7 @@ func (nt *Network) UpdateLayerMaps() {
nt.UpdateLayerNameMap()
nt.LayerClassMap = make(map[string][]string)
for _, ly := range nt.Layers {
- cs := ly.Type.String() + ly.Class
+ cs := ly.Type.String() + " " + ly.Class
cls := strings.Split(cs, " ")
for _, cl := range cls {
if cl == "" {
diff --git a/axon/simstats.go b/axon/simstats.go
index 6b2ea3a5..3eb94ef6 100644
--- a/axon/simstats.go
+++ b/axon/simstats.go
@@ -140,7 +140,7 @@ func StatLoopCounters(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, ne
modeDir := statsDir.RecycleDir(mode.String())
curModeDir := currentDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
- tsr := tensorfs.Value[int](levelDir, name)
+ tsr := levelDir.Int(name)
if start {
tsr.SetNumRows(0)
if ps := plot.GetStylersFrom(tsr); ps == nil {
@@ -151,7 +151,7 @@ func StatLoopCounters(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, ne
}
if level.Int64() == trialLevel.Int64() {
for di := range ndata {
- tensorfs.Value[int](curModeDir, name, ndata).SetInt1D(0, di)
+ curModeDir.Int(name, ndata).SetInt1D(0, di)
}
}
continue
@@ -159,14 +159,14 @@ func StatLoopCounters(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, ne
ctr := st.Loops[lev].Counter.Cur
if level.Int64() == trialLevel.Int64() {
for di := range ndata {
- tensorfs.Value[int](curModeDir, name, ndata).SetInt1D(ctr, di)
+ curModeDir.Int(name, ndata).SetInt1D(ctr, di)
tsr.AppendRowInt(ctr)
if lev.Int64() == trialLevel.Int64() {
ctr++
}
}
} else {
- tensorfs.Scalar[int](curModeDir, name).SetInt1D(ctr, 0)
+ curModeDir.Int(name, 1).SetInt1D(ctr, 0)
tsr.AppendRowInt(ctr)
}
}
@@ -193,9 +193,9 @@ func StatRunName(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, net *Ne
name := "RunName"
modeDir := statsDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
- tsr := tensorfs.Value[string](levelDir, name)
+ tsr := levelDir.StringValue(name)
ndata := int(net.Context().NData)
- runNm := tensorfs.Scalar[string](currentDir, name).String1D(0)
+ runNm := currentDir.StringValue(name, 1).String1D(0)
if start {
tsr.SetNumRows(0)
@@ -234,7 +234,7 @@ func StatPerTrialMSec(statsDir *tensorfs.Node, statName string, trainMode enums.
name := "PerTrialMSec"
modeDir := statsDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
- tsr := tensorfs.Value[float64](levelDir, name)
+ tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
if ps := plot.GetStylersFrom(tsr); ps == nil {
@@ -284,7 +284,7 @@ func StatLayerActGe(statsDir *tensorfs.Node, net *Network, trainMode, trialLevel
ly := net.LayerByName(lnm)
lpi := ly.Params.PoolIndex(0)
name := lnm + "_" + statName
- tsr := tensorfs.Value[float64](levelDir, name)
+ tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
if ps := plot.GetStylersFrom(tsr); ps == nil {
@@ -339,7 +339,7 @@ func StatLayerState(statsDir *tensorfs.Node, net *Network, smode, slevel enums.E
name := lnm + "_" + variable
sizes := []int{ndata}
sizes = append(sizes, ly.GetSampleShape().Sizes...)
- tsr := tensorfs.Value[float64](levelDir, name, sizes...)
+ tsr := levelDir.Float64(name, sizes...)
if start {
tsr.SetNumRows(0)
continue
@@ -384,11 +384,11 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
ly := net.LayerByName(lnm)
sizes := []int{ndata}
sizes = append(sizes, ly.GetSampleShape().Sizes...)
- vtsr := tensorfs.Value[float64](pcaDir, lnm, sizes...)
- vecs := tensorfs.Value[float64](curModeDir, "PCA_Vecs_"+lnm).(*tensor.Float64)
- vals := tensorfs.Value[float64](curModeDir, "PCA_Vals_"+lnm).(*tensor.Float64)
+ vtsr := pcaDir.Float64(lnm, sizes...)
+ vecs := curModeDir.Float64("PCA_Vecs_" + lnm)
+ vals := curModeDir.Float64("PCA_Vals_" + lnm)
if levi == 0 {
- ltsr := tensorfs.Value[float64](curModeDir, "PCA_ActM_"+lnm, ly.GetSampleShape().Sizes...)
+ ltsr := curModeDir.Float64("PCA_ActM_"+lnm, ly.GetSampleShape().Sizes...)
if start {
vtsr.SetNumRows(0)
} else {
@@ -404,7 +404,7 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
if !start && levi == 1 {
if interval > 0 && epc%interval == 0 {
hasNew = true
- covar := tensorfs.Value[float64](curModeDir, "PCA_Covar_"+lnm)
+ covar := curModeDir.Float64("PCA_Covar_" + lnm)
metric.CovarianceMatrixOut(metric.Covariance, vtsr, covar)
matrix.SVDOut(covar, vecs, vals)
ln := vals.Len()
@@ -433,7 +433,7 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
}
for si, statName := range statNames {
name := lnm + "_" + statName
- tsr := tensorfs.Value[float64](levelDir, name)
+ tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
if ps := plot.GetStylersFrom(tsr); ps == nil {
diff --git a/examples/deep_fsa/config.go b/examples/deep_fsa/config.go
index b83e8b78..63fa9f3d 100644
--- a/examples/deep_fsa/config.go
+++ b/examples/deep_fsa/config.go
@@ -4,6 +4,8 @@
package main
+import "cogentcore.org/core/math32/vecint"
+
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
// other params are set via the Env map data mechanism.
@@ -15,11 +17,11 @@ type EnvConfig struct {
// number of units per localist output unit -- 1 works better than 5 here
UnitsPer int `default:"1"`
- // ] names of input letters
+ // InputNames are names of input letters.
InputNames []string `default:"['B','T','S','X','V','P','E']"`
- // map of input names -- initialized during Configenv
- InputNameMap map[string]int
+ // InputMap is the map of input names, initialized during ConfigEnv.
+ InputNameMap map[string]int `display:"-"`
}
// InitNameMap is called during ConfigEnv
@@ -33,115 +35,128 @@ func (cfg *EnvConfig) InitNameMap() {
}
}
-// ParamConfig has config parameters related to sim params
+// ParamConfig has config parameters related to sim params.
type ParamConfig struct {
- // network parameters
- Network map[string]any
+ // Hidden1Size is the size of hidden 1 layer.
+ Hidden1Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`
+
+ // Hidden2Size is the size of hidden 2 layer.
+ Hidden2Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`
- // Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
+ // Sheet is the extra params sheet name(s) to use (space separated
+ // if multiple). Must be valid name as listed in compiled-in params
+ // or loaded params.
Sheet string
- // extra tag to add to file names and logs saved from this run
+ // Tag is an extra tag to add to file names and logs saved from this run.
Tag string
- // user note -- describe the run params etc -- like a git commit message for the run
+ // Note is additional info to describe the run params etc,
+ // like a git commit message for the run.
Note string
- // Name of the JSON file to input saved parameters from.
- File string `nest:"+"`
-
- // Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
+ // SaveAll will save a snapshot of all current param and config settings
+ // in a directory named params_ (or _good if Good is true),
+ // then quit. Useful for comparing to later changes and seeing multiple
+ // views of current params.
SaveAll bool `nest:"+"`
- // for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
+ // Good is for SaveAll, save to params_good for a known good params state.
+ // This can be done prior to making a new release after all tests are passing.
+ // Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}
-// RunConfig has config parameters related to running the sim
+// RunConfig has config parameters related to running the sim.
type RunConfig struct {
- // use the GPU for computation -- generally faster even for small models if NData ~16
+ // GPU uses the GPU for computation, generally faster than CPU even for
+ // small models if NData ~16.
GPU bool `default:"true"`
- // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. 16 learns just as well as 1 -- no diffs.
+ // NData is the number of data-parallel items to process in parallel per trial.
+ // Is significantly faster for both CPU and GPU. Results in an effective
+ // mini-batch of learning.
NData int `default:"16" min:"1"`
- // number of parallel threads for CPU computation -- 0 = use default
+ // NThreads is the number of parallel threads for CPU computation;
+ // 0 = use default.
NThreads int `default:"0"`
- // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ // Run is the _starting_ run number, which determines the random seed.
+ // NRuns counts up from there. Can do all runs in parallel by launching
+ // separate jobs with each starting Run, NRuns = 1.
Run int `default:"0"`
- // total number of runs to do when running Train
- NRuns int `default:"5" min:"1"`
+ // Runs is the total number of runs to do when running Train, starting from Run.
+ Runs int `default:"5" min:"1"`
+
+ // Epochs is the total number of epochs per run.
+ Epochs int `default:"100"`
+
+ // Trials is the total number of trials per epoch.
+ // Should be an even multiple of NData.
+ Trials int `default:"32"`
- // total number of epochs per run
- NEpochs int `default:"100"`
+ // Cycles is the total number of cycles per trial: at least 200.
+ Cycles int `default:"200"`
- // total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `default:"196"`
+ // PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100.
+ PlusCycles int `default:"50"`
- // total number of cycles per trial. at least 200
- NCycles int `default:"200"`
+ // NZero is how many perfect, zero-error epochs before stopping a Run.
+ NZero int `default:"2"`
- // total number of plus-phase cycles per trial. for NCycles=300, use 100
- NPlusCycles int `default:"50"`
+ // TestInterval is how often (in epochs) to run through all the test patterns,
+ // in terms of training epochs. Can use 0 or -1 for no testing.
+ TestInterval int `default:"0"`
- // how frequently (in epochs) to compute PCA on hidden representations to measure variance?
+ // PCAInterval is how often (in epochs) to compute PCA on hidden
+ // representations to measure variance.
PCAInterval int `default:"5"`
- // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
- TestInterval int `default:"-1"`
+ // StartWts is the name of weights file to load at start of first run.
+ StartWts string
}
-// LogConfig has config parameters related to logging data
-type LogConfig struct { //types:add
+// LogConfig has config parameters related to logging data.
+type LogConfig struct {
- // if true, save final weights after each run
+ // SaveWeights will save final weights after each run.
SaveWeights bool
- // if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `default:"true" nest:"+"`
-
- // if true, save run log to file, as .run.tsv typically
- Run bool `default:"true" nest:"+"`
-
- // if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `default:"false" nest:"+"`
-
- // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `default:"false" nest:"+"`
-
- // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `default:"false" nest:"+"`
+ // Train has the list of Train mode levels to save log files for.
+ Train []string `default:"['Run', 'Epoch']" nest:"+"`
- // if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool
+ // Test has the list of Test mode levels to save log files for.
+ Test []string `nest:"+"`
}
// Config is a standard Sim config -- use as a starting point.
-type Config struct { //types:add
+type Config struct {
- // specify include files here, and after configuration, it contains list of include files added
+ // Includes has a list of additional config files to include.
+ // After configuration, it contains list of include files added.
Includes []string
- // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ // GUI means open the GUI. Otherwise it runs automatically and quits,
+ // saving results to log files.
GUI bool `default:"true"`
- // log debugging information
+ // Debug reports debugging information.
Debug bool
- // environment configuration options
+ // Env has environment related configuration options.
Env EnvConfig `display:"add-fields"`
- // parameter related configuration options
+ // Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`
- // sim running related configuration options
+ // Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`
- // data logging related configuration options
+ // Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}
diff --git a/examples/deep_fsa/deep_fsa.go b/examples/deep_fsa/deep_fsa.go
index cd25eef9..c4409b74 100644
--- a/examples/deep_fsa/deep_fsa.go
+++ b/examples/deep_fsa/deep_fsa.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019, The Emergent Authors. All rights reserved.
+// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -6,46 +6,63 @@
// finite state automaton problem.
package main
-//go:generate core generate -add-types
+//go:generate core generate -add-types -add-funcs
import (
+ "fmt"
"log"
- "os"
- "reflect"
"cogentcore.org/core/base/mpi"
"cogentcore.org/core/base/randx"
+ "cogentcore.org/core/cli"
"cogentcore.org/core/core"
+ "cogentcore.org/core/enums"
"cogentcore.org/core/icons"
"cogentcore.org/core/math32"
- "cogentcore.org/core/math32/minmax"
+ "cogentcore.org/core/plot"
"cogentcore.org/core/tensor/stats/stats"
+ "cogentcore.org/core/tensor/tensorfs"
"cogentcore.org/core/tree"
"github.com/emer/axon/v2/axon"
- "github.com/emer/emergent/v2/econfig"
"github.com/emer/emergent/v2/egui"
- "github.com/emer/emergent/v2/elog"
- "github.com/emer/emergent/v2/emer"
"github.com/emer/emergent/v2/env"
- "github.com/emer/emergent/v2/estats"
- "github.com/emer/emergent/v2/etime"
"github.com/emer/emergent/v2/looper"
- "github.com/emer/emergent/v2/netview"
- "github.com/emer/emergent/v2/params"
"github.com/emer/emergent/v2/paths"
)
func main() {
- sim := &Sim{}
- sim.New()
- sim.ConfigAll()
- if sim.Config.GUI {
- sim.RunGUI()
- } else {
- sim.RunNoGUI()
- }
+ opts := cli.DefaultOptions("deep_fsa", "Deep FSA")
+ opts.DefaultFiles = append(opts.DefaultFiles, "config.toml")
+ cfg := &Config{}
+ cli.Run(opts, cfg, RunSim)
}
+// Modes are the looping modes (Stacks) for running and statistics.
+type Modes int32 //enums:enum
+const (
+ Train Modes = iota
+ Test
+)
+
+// Levels are the looping levels for running and statistics.
+type Levels int32 //enums:enum
+const (
+ Cycle Levels = iota
+ Trial
+ Epoch
+ Run
+)
+
+// StatsPhase is the phase of stats processing for given mode, level.
+// Accumulated values are reset at Start, added each Step.
+type StatsPhase int32 //enums:enum
+const (
+ Start StatsPhase = iota
+ Step
+)
+
+// see params.go for params, config.go for Config
+
// Sim encapsulates the entire simulation model, and we define all the
// functionality as methods on this struct. This structure keeps all relevant
// state information organized and available without having to pass everything around
@@ -54,64 +71,79 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `new-window:"+"`
+ Config *Config `new-window:"+"`
- // the network -- click to view / edit parameters for layers, paths, etc
+ // Net is the network: click to view / edit parameters for layers, paths, etc.
Net *axon.Network `new-window:"+" display:"no-inline"`
- // all parameter management
- Params emer.NetParams `display:"add-fields"`
+ // Params manages network parameter setting.
+ Params axon.Params
- // contains looper control loops for running sim
- Loops *looper.Manager `new-window:"+" display:"no-inline"`
+ // Loops are the the control loops for running the sim, in different Modes
+ // across stacks of Levels.
+ Loops *looper.Stacks `new-window:"+" display:"no-inline"`
- // contains computed statistic values
- Stats estats.Stats `new-window:"+"`
+ // Envs provides mode-string based storage of environments.
+ Envs env.Envs `new-window:"+" display:"no-inline"`
- // Contains all the logs and information about the logs.'
- Logs elog.Logs `new-window:"+"`
+ // TrainUpdate has Train mode netview update parameters.
+ TrainUpdate axon.NetViewUpdate `display:"inline"`
- // Environments
- Envs env.Envs `new-window:"+" display:"no-inline"`
+ // TestUpdate has Test mode netview update parameters.
+ TestUpdate axon.NetViewUpdate `display:"inline"`
+
+ // Root is the root tensorfs directory, where all stats and other misc sim data goes.
+ Root *tensorfs.Node `display:"-"`
- // axon timing parameters and state
- Context axon.Context `new-window:"+"`
+ // Stats has the stats directory within Root.
+ Stats *tensorfs.Node `display:"-"`
- // netview update parameters
- ViewUpdate netview.ViewUpdate `display:"add-fields"`
+ // Current has the current stats values within Stats.
+ Current *tensorfs.Node `display:"-"`
- // manages all the gui elements
+ // StatFuncs are statistics functions called at given mode and level,
+ // to perform all stats computations. phase = Start does init at start of given level,
+ // and all intialization / configuration (called during Init too).
+ StatFuncs []func(mode Modes, level Levels, phase StatsPhase) `display:"-"`
+
+ // GUI manages all the GUI elements
GUI egui.GUI `display:"-"`
- // a list of random seeds to use for each run
+ // RandSeeds is a list of random seeds to use for each run.
RandSeeds randx.Seeds `display:"-"`
}
-// New creates new blank elements and initializes defaults
-func (ss *Sim) New() {
- econfig.Config(&ss.Config, "config.toml")
- ss.Net = axon.NewNetwork("DeepFSA")
- ss.Params.Config(ParamSets, ss.Config.Params.Sheet, ss.Config.Params.Tag, ss.Net)
- ss.Stats.Init()
- ss.RandSeeds.Init(100) // max 100 runs
- ss.InitRandSeed(0)
- ss.Context.Defaults()
- ss.Context.ThetaCycles = int32(ss.Config.Run.NCycles)
+// RunSim runs the simulation with given configuration.
+func RunSim(cfg *Config) error {
+ sim := &Sim{}
+ sim.Config = cfg
+ sim.Run()
+ return nil
}
-////////////////////////////////////////////////////////////////////////////////////////////
-// Configs
-
-// ConfigAll configures all the elements using the standard functions
-func (ss *Sim) ConfigAll() {
+func (ss *Sim) Run() {
+ ss.Root, _ = tensorfs.NewDir("Root")
+ ss.Net = axon.NewNetwork("RA25")
+ ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag)
+ ss.RandSeeds.Init(100) // max 100 runs
+ ss.InitRandSeed(0)
+ if ss.Config.Run.GPU {
+ axon.GPUInit()
+ axon.UseGPU = true
+ }
ss.ConfigEnv()
ss.ConfigNet(ss.Net)
- ss.ConfigLogs()
ss.ConfigLoops()
+ ss.ConfigStats()
if ss.Config.Params.SaveAll {
ss.Config.Params.SaveAll = false
- ss.Net.SaveParamsSnapshot(&ss.Params.Params, &ss.Config, ss.Config.Params.Good)
- os.Exit(0)
+ ss.Net.SaveParamsSnapshot(&ss.Config, ss.Config.Params.Good)
+ return
+ }
+ if ss.Config.GUI {
+ ss.RunGUI()
+ } else {
+ ss.RunNoGUI()
}
}
@@ -126,27 +158,27 @@ func (ss *Sim) ConfigEnv() {
trn = &FSAEnv{}
tst = &FSAEnv{}
} else {
- trn = ss.Envs.ByModeDi(etime.Train, di).(*FSAEnv)
- tst = ss.Envs.ByModeDi(etime.Test, di).(*FSAEnv)
+ trn = ss.Envs.ByModeDi(Train, di).(*FSAEnv)
+ tst = ss.Envs.ByModeDi(Test, di).(*FSAEnv)
}
// note: names must be standard here!
- trn.Name = env.ModeDi(etime.Train, di)
+ trn.Name = env.ModeDi(Train, di)
trn.Seq.Max = 25 // 25 sequences per epoch training
trn.RandSeed = 73 + int64(di)*73
trn.TMatReber()
- if ss.Config.Env.Env != nil {
- params.ApplyMap(trn, ss.Config.Env.Env, ss.Config.Debug)
- }
+ // if ss.Config.Env.Env != nil {
+ // params.ApplyMap(trn, ss.Config.Env.Env, ss.Config.Debug)
+ // }
trn.Validate()
- tst.Name = env.ModeDi(etime.Test, di)
+ tst.Name = env.ModeDi(Test, di)
tst.Seq.Max = 10
tst.RandSeed = 181 + int64(di)*181
tst.TMatReber() // todo: random
- if ss.Config.Env.Env != nil {
- params.ApplyMap(tst, ss.Config.Env.Env, ss.Config.Debug)
- }
+ // if ss.Config.Env.Env != nil {
+ // params.ApplyMap(tst, ss.Config.Env.Env, ss.Config.Debug)
+ // }
tst.Validate()
trn.Init(0)
@@ -158,21 +190,20 @@ func (ss *Sim) ConfigEnv() {
}
func (ss *Sim) ConfigNet(net *axon.Network) {
- ctx := &ss.Context
- net.SetMaxData(ctx, ss.Config.Run.NData)
+ net.SetMaxData(ss.Config.Run.NData)
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0
- full := paths.NewFull()
- full.SelfCon = true // unclear if this makes a diff for self cons at all
- // one2one := paths.NewOneToOne()
- // _ = one2one
-
in, inp := net.AddInputPulv4D("Input", 1, 7, ss.Config.Env.UnitsPer, 1, 2)
trg := net.AddLayer2D("Targets", axon.InputLayer, 1, 7) // just for visualization
in.AddClass("InLay")
inp.AddClass("InLay")
trg.AddClass("InLay")
+ full := paths.NewFull()
+ full.SelfCon = true // unclear if this makes a diff for self cons at all
+ // one2one := paths.NewOneToOne()
+ // _ = one2one
+
hid, hidct := net.AddSuperCT2D("Hidden", "", 10, 10, 2, full)
// full > one2one -- one2one weights go to 0 -- this is key for more posterior-cortical CT
// hidct.Shape.SetShape([]int{10, 20}, nil, nil) // 200 == 500 == 1000 >> 100 here!
@@ -193,39 +224,33 @@ func (ss *Sim) ConfigNet(net *axon.Network) {
inp.PlaceBehind(in, 2)
trg.PlaceBehind(inp, 2)
- net.Build(ctx)
+ net.Build()
net.Defaults()
net.SetNThreads(ss.Config.Run.NThreads)
ss.ApplyParams()
- net.InitWeights(ctx)
+ net.InitWeights()
}
func (ss *Sim) ApplyParams() {
- ss.Params.SetAll() // first hard-coded defaults
- if ss.Config.Params.Network != nil {
- ss.Params.SetNetworkMap(ss.Net, ss.Config.Params.Network)
- }
+ ss.Params.ApplyAll(ss.Net)
}
-////////////////////////////////////////////////////////////////////////////////
-// Init, utils
+//////// Init, utils
// Init restarts the run, and initializes everything, including network weights
// and resets the epoch log table
func (ss *Sim) Init() {
- if ss.Config.GUI {
- ss.Stats.SetString("RunName", ss.Params.RunName(0)) // in case user interactively changes tag
- }
ss.Loops.ResetCounters()
+ ss.SetRunName()
ss.InitRandSeed(0)
- ss.ConfigEnv() // re-config env just in case a different set of patterns was
+ // ss.ConfigEnv() // re-config env just in case a different set of patterns was
// selected or patterns have been modified etc
ss.GUI.StopNow = false
ss.ApplyParams()
- ss.Net.GPU.SyncParamsToGPU()
+ ss.InitStats()
ss.NewRun()
- ss.ViewUpdate.RecordSyns()
- ss.ViewUpdate.Update()
+ ss.TrainUpdate.RecordSyns()
+ ss.TrainUpdate.Update(Train, Trial)
}
// InitRandSeed initializes the random seed based on current training run number
@@ -234,128 +259,106 @@ func (ss *Sim) InitRandSeed(run int) {
ss.RandSeeds.Set(run, &ss.Net.Rand)
}
+// CurrentMode returns the current Train / Test mode from Context.
+func (ss *Sim) CurrentMode() Modes {
+ ctx := ss.Net.Context()
+ var md Modes
+ md.SetInt64(int64(ctx.Mode))
+ return md
+}
+
+// NetViewUpdater returns the NetViewUpdate for given mode.
+func (ss *Sim) NetViewUpdater(mode enums.Enum) *axon.NetViewUpdate {
+ if mode.Int64() == Train.Int64() {
+ return &ss.TrainUpdate
+ }
+ return &ss.TestUpdate
+}
+
// ConfigLoops configures the control loops: Training, Testing
func (ss *Sim) ConfigLoops() {
- man := looper.NewManager()
-
- ncyc := ss.Config.Run.NCycles
- nplus := ss.Config.Run.NPlusCycles
- trls := int(math32.IntMultipleGE(float32(ss.Config.Run.NTrials), float32(ss.Config.Run.NData)))
-
- man.AddStack(etime.Train).
- AddTime(etime.Run, ss.Config.Run.NRuns).
- AddTime(etime.Epoch, ss.Config.Run.NEpochs).
- AddTimeIncr(etime.Trial, trls, ss.Config.Run.NData).
- AddTime(etime.Cycle, ncyc)
-
- man.AddStack(etime.Test).
- AddTime(etime.Epoch, 1).
- AddTimeIncr(etime.Trial, trls, ss.Config.Run.NData).
- AddTime(etime.Cycle, ncyc)
-
- axon.LooperStdPhases(man, &ss.Context, ss.Net, ncyc-nplus, ncyc-1)
- axon.LooperSimCycleAndLearn(man, ss.Net, &ss.Context, &ss.ViewUpdate) // std algo code
-
- for m, _ := range man.Stacks {
- stack := man.Stacks[m]
- stack.Loops[etime.Trial].OnStart.Add("ApplyInputs", func() {
- ss.ApplyInputs()
- })
- }
+ ls := looper.NewStacks()
- man.GetLoop(etime.Train, etime.Run).OnStart.Add("NewRun", ss.NewRun)
+ trials := int(math32.IntMultipleGE(float32(ss.Config.Run.Trials), float32(ss.Config.Run.NData)))
+ cycles := ss.Config.Run.Cycles
+ plusPhase := ss.Config.Run.PlusCycles
- // Add Testing
- trainEpoch := man.GetLoop(etime.Train, etime.Epoch)
- trainEpoch.OnStart.Add("TestAtInterval", func() {
- if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
- // Note the +1 so that it doesn't occur at the 0th timestep.
- ss.TestAll()
- }
- })
+ ls.AddStack(Train, Trial).
+ AddLevel(Run, ss.Config.Run.Runs).
+ AddLevel(Epoch, ss.Config.Run.Epochs).
+ AddLevelIncr(Trial, trials, ss.Config.Run.NData).
+ AddLevel(Cycle, cycles)
- /////////////////////////////////////////////
- // Logging
+ ls.AddStack(Test, Trial).
+ AddLevel(Epoch, 1).
+ AddLevelIncr(Trial, trials, ss.Config.Run.NData).
+ AddLevel(Cycle, cycles)
- man.GetLoop(etime.Test, etime.Epoch).OnEnd.Add("LogTestErrors", func() {
- axon.LogTestErrors(&ss.Logs)
- })
- man.GetLoop(etime.Train, etime.Epoch).OnEnd.Add("PCAStats", func() {
- trnEpc := man.Stacks[etime.Train].Loops[etime.Epoch].Counter.Cur
- if ss.Config.Run.PCAInterval > 0 && trnEpc%ss.Config.Run.PCAInterval == 0 {
- axon.PCAStats(ss.Net, &ss.Logs, &ss.Stats)
- ss.Logs.ResetLog(etime.Analyze, etime.Trial)
- }
+ axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 50, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
+
+ ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })
+
+ ls.AddOnStartToLoop(Trial, "ApplyInputs", func(mode enums.Enum) {
+ ss.ApplyInputs(mode.(Modes))
})
- man.AddOnEndToAll("Log", ss.Log)
- axon.LooperResetLogBelow(man, &ss.Logs)
+ ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun)
- man.GetLoop(etime.Train, etime.Trial).OnEnd.Add("LogAnalyze", func() {
- trnEpc := man.Stacks[etime.Train].Loops[etime.Epoch].Counter.Cur
- if (ss.Config.Run.PCAInterval > 0) && (trnEpc%ss.Config.Run.PCAInterval == 0) {
- ss.Log(etime.Analyze, etime.Trial)
+ trainEpoch := ls.Loop(Train, Epoch)
+ trainEpoch.IsDone.AddBool("NZeroStop", func() bool {
+ stopNz := ss.Config.Run.NZero
+ if stopNz <= 0 {
+ return false
}
+ curModeDir := ss.Current.RecycleDir(Train.String())
+ curNZero := int(curModeDir.Value("NZero").Float1D(-1))
+ stop := curNZero >= stopNz
+ return stop
+ return false
})
- man.GetLoop(etime.Train, etime.Run).OnEnd.Add("RunStats", func() {
- ss.Logs.RunStats("PctCor", "FirstZero", "LastZero")
+ trainEpoch.OnStart.Add("TestAtInterval", func() {
+ if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) {
+ ss.TestAll()
+ }
})
- // Save weights to file, to look at later
- man.GetLoop(etime.Train, etime.Run).OnEnd.Add("SaveWeights", func() {
- ctrString := ss.Stats.PrintValues([]string{"Run", "Epoch"}, []string{"%03d", "%05d"}, "_")
- axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.Stats.String("RunName"))
+ ls.AddOnStartToAll("StatsStart", ss.StatsStart)
+ ls.AddOnEndToAll("StatsStep", ss.StatsStep)
+
+ ls.Loop(Train, Run).OnEnd.Add("SaveWeights", func() {
+ ctrString := fmt.Sprintf("%03d_%05d", ls.Loop(Train, Run).Counter.Cur, ls.Loop(Train, Epoch).Counter.Cur)
+ axon.SaveWeightsIfConfigSet(ss.Net, ss.Config.Log.SaveWeights, ctrString, ss.RunName())
})
- // // lrate schedule
- // man.GetLoop(etime.Train, etime.Epoch).OnEnd.Add("LRateSched", func() {
- // trnEpc := ss.Loops.Stacks[etime.Train].Loops[etime.Epoch].Counter.Cur
- // switch trnEpc {
- // case 40:
- // // mpi.Printf("learning rate drop at: %d\n", trnEpc)
- // // ss.Net.LRateSched(0.2) // 0.2
- // case 60:
- // // mpi.Printf("learning rate drop at: %d\n", trnEpc)
- // // ss.Net.LRateSched(0.1) // 0.1
- // }
- // })
-
- ////////////////////////////////////////////
- // GUI
-
- if !ss.Config.GUI {
- if ss.Config.Log.NetData {
- man.GetLoop(etime.Test, etime.Trial).Main.Add("NetDataRecord", func() {
- ss.GUI.NetDataRecord(ss.ViewUpdate.Text)
- })
- }
- } else {
- axon.LooperUpdateNetView(man, &ss.ViewUpdate, ss.Net, ss.NetViewCounters)
- axon.LooperUpdatePlots(man, &ss.GUI)
+ if ss.Config.GUI {
+ axon.LooperUpdateNetView(ls, Cycle, Trial, ss.NetViewUpdater, ss.StatCounters)
+
+ ls.Stacks[Train].OnInit.Add("GUI-Init", func() { ss.GUI.UpdateWindow() })
+ ls.Stacks[Test].OnInit.Add("GUI-Init", func() { ss.GUI.UpdateWindow() })
}
if ss.Config.Debug {
- mpi.Println(man.DocString())
+ mpi.Println(ls.DocString())
}
- ss.Loops = man
+ ss.Loops = ls
}
-// ApplyInputs applies input patterns from given envirbonment.
-// It is good practice to have this be a separate method with appropriate
-// args so that it can be used for various different contexts
-// (training, testing, etc).
-func (ss *Sim) ApplyInputs() {
+// ApplyInputs applies input patterns from given environment for given mode.
+// Any other start-of-trial logic can also be put here.
+func (ss *Sim) ApplyInputs(mode Modes) {
net := ss.Net
- ctx := &ss.Context
+ ctx := ss.Net.Context()
+ ndata := int(ctx.NData)
in := net.LayerByName("Input")
trg := net.LayerByName("Targets")
clrmsk, setmsk, _ := in.ApplyExtFlags()
+ curModeDir := ss.Current.RecycleDir(mode.String())
- net.InitExt(ctx)
+ net.InitExt()
for di := uint32(0); di < ctx.NData; di++ {
- fsenv := ss.Envs.ByModeDi(ctx.Mode, int(di)).(*FSAEnv)
+ fsenv := ss.Envs.ByModeDi(mode, int(di)).(*FSAEnv)
fsenv.Step()
ns := fsenv.NNext.Values[0]
for i := 0; i < ns; i++ {
@@ -368,296 +371,351 @@ func (ss *Sim) ApplyInputs() {
if i == 0 {
for yi := 0; yi < ss.Config.Env.UnitsPer; yi++ {
idx := li*ss.Config.Env.UnitsPer + yi
- in.ApplyExtValue(ctx, uint32(idx), di, 1, clrmsk, setmsk, false)
+ in.ApplyExtValue(uint32(idx), di, 1, clrmsk, setmsk, false)
}
}
- trg.ApplyExtValue(ctx, uint32(li), di, 1, clrmsk, setmsk, false)
+ trg.ApplyExtValue(uint32(li), di, 1, clrmsk, setmsk, false)
+ curModeDir.StringValue("TrialName", ndata).SetString1D(fsenv.String(), int(di))
}
}
- ss.Net.ApplyExts(ctx)
+ ss.Net.ApplyExts()
}
-// NewRun intializes a new run of the model, using the TrainEnv.Run counter
-// for the new run value
+// NewRun intializes a new Run level of the model.
func (ss *Sim) NewRun() {
- ctx := &ss.Context
- ss.InitRandSeed(ss.Loops.GetLoop(etime.Train, etime.Run).Counter.Cur)
+ ctx := ss.Net.Context()
+ ss.InitRandSeed(ss.Loops.Loop(Train, Run).Counter.Cur)
for di := 0; di < int(ctx.NData); di++ {
- ss.Envs.ByModeDi(etime.Train, di).Init(0)
- ss.Envs.ByModeDi(etime.Test, di).Init(0)
+ ss.Envs.ByModeDi(Train, di).Init(0)
+ ss.Envs.ByModeDi(Test, di).Init(0)
}
ctx.Reset()
- ctx.Mode = etime.Train
- ss.Net.InitWeights(ctx)
- ss.InitStats()
- ss.StatCounters(0)
- ss.Logs.ResetLog(etime.Train, etime.Epoch)
- ss.Logs.ResetLog(etime.Test, etime.Epoch)
+ ss.Net.InitWeights()
+ if ss.Config.Run.StartWts != "" { // this is just for testing -- not usually needed
+ ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWts))
+ mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWts)
+ }
}
// TestAll runs through the full set of testing items
func (ss *Sim) TestAll() {
- ss.Envs.ByMode(etime.Test).Init(0)
- ss.Loops.ResetAndRun(etime.Test)
- ss.Loops.Mode = etime.Train // Important to reset Mode back to Train because this is called from within the Train Run.
+ ctx := ss.Net.Context()
+ for di := 0; di < int(ctx.NData); di++ {
+ ss.Envs.ByModeDi(Test, di).Init(0)
+ }
+ ss.Loops.ResetAndRun(Test)
+ ss.Loops.Mode = Train // important because this is called from Train Run: go back.
}
-// InitStats initializes all the statistics.
-// called at start of new run
-func (ss *Sim) InitStats() {
- // clear rest just to make Sim look initialized
- ss.Stats.SetFloat("UnitErr", 0.0)
- ss.Stats.SetFloat("PhaseDiff", 0.0)
- ss.Stats.SetInt("Output", 0)
- ss.Logs.InitErrStats() // inits TrlErr, FirstZero, LastZero, NZero
-}
+//////// Stats
-// StatCounters saves current counters to Stats, so they are available for logging etc
-// Also saves a string rep of them for ViewUpdate.Text
-func (ss *Sim) StatCounters(di int) {
- ctx := &ss.Context
- mode := ss.Context.Mode
- ss.Loops.Stacks[mode].CountersToStats(&ss.Stats)
- // always use training epoch..
- trnEpc := ss.Loops.Stacks[etime.Train].Loops[etime.Epoch].Counter.Cur
- ss.Stats.SetInt("Epoch", trnEpc)
- trl := ss.Stats.Int("Trial")
- ss.Stats.SetInt("Trial", trl+di)
- ss.Stats.SetInt("Di", di)
- ss.Stats.SetInt("Cycle", int(ctx.Cycle))
- ev := ss.Envs.ByModeDi(ctx.Mode, int(di)).(*FSAEnv)
- ss.Stats.SetString("TrialName", ev.String())
+// AddStat adds a stat compute function.
+func (ss *Sim) AddStat(f func(mode Modes, level Levels, phase StatsPhase)) {
+ ss.StatFuncs = append(ss.StatFuncs, f)
}
-func (ss *Sim) NetViewCounters(tm etime.Times) {
- if ss.ViewUpdate.View == nil {
+// StatsStart is called by Looper at the start of given level, for each iteration.
+// It needs to call RunStats Start at the next level down.
+// e.g., each Epoch is the start of the full set of Trial Steps.
+func (ss *Sim) StatsStart(lmd, ltm enums.Enum) {
+ mode := lmd.(Modes)
+ level := ltm.(Levels)
+ if level <= Trial {
return
}
- di := ss.ViewUpdate.View.Di
- if tm == etime.Trial {
- ss.TrialStats(di) // get trial stats for current di
+ ss.RunStats(mode, level-1, Start)
+}
+
+// StatsStep is called by Looper at each step of iteration,
+// where it accumulates the stat results.
+func (ss *Sim) StatsStep(lmd, ltm enums.Enum) {
+ mode := lmd.(Modes)
+ level := ltm.(Levels)
+ if level == Cycle {
+ return
}
- ss.StatCounters(di)
- ss.ViewUpdate.Text = ss.Stats.Print([]string{"Run", "Epoch", "Trial", "Di", "Cycle", "TrialName", "Output", "TrlErr", "PhaseDiff"})
+ ss.RunStats(mode, level, Step)
+ tensorfs.DirTable(axon.StatsNode(ss.Stats, mode, level), nil).WriteToLog()
}
-// TrialStats computes the trial-level statistics.
-// Aggregation is done directly from log data.
-func (ss *Sim) TrialStats(di int) {
- ctx := &ss.Context
- inp := ss.Net.LayerByName("InputP")
- trg := ss.Net.LayerByName("Targets")
-
- ss.Stats.SetFloat("PhaseDiff", float64(inp.Values[di].PhaseDiff.Cor))
- _, minusIndexes, _ := inp.LocalistErr4D(ctx)
- minusIndex := minusIndexes[di]
- trgExt := axon.Neurons[trg.NeurStIndex+uint32(minusIndex), di, axon.Ext]
- err := true
- if trgExt > 0.5 {
- err = false
+// RunStats runs the StatFuncs for given mode, level and phase.
+func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
+ for _, sf := range ss.StatFuncs {
+ sf(mode, level, phase)
}
- ss.Stats.SetInt("Output", minusIndex)
- ss.Stats.SetFloat("UnitErr", inp.PctUnitErr(ctx)[di])
- if err {
- ss.Stats.SetFloat("TrlErr", 1)
- } else {
- ss.Stats.SetFloat("TrlErr", 0)
+ if phase == Step && ss.GUI.Tabs != nil {
+ nm := mode.String() + "/" + level.String() + " Plot"
+ ss.GUI.Tabs.GoUpdatePlot(nm)
}
}
-// ////////////////////////////////////////////////////////////////////////////
-//
-// Logging
-func (ss *Sim) ConfigLogs() {
- ss.Stats.SetString("RunName", ss.Params.RunName(0)) // used for naming logs, stats, etc
+// SetRunName sets the overall run name, used for naming output logs and weight files
+// based on params extra sheets and tag, and starting run number (for distributed runs).
+func (ss *Sim) SetRunName() string {
+ runName := ss.Params.RunName(ss.Config.Run.Run)
+ ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
+ return runName
+}
- ss.Logs.AddCounterItems(etime.Run, etime.Epoch, etime.Trial, etime.Cycle)
- ss.Logs.AddStatIntNoAggItem(etime.AllModes, etime.Trial, "Di")
- ss.Logs.AddStatStringItem(etime.AllModes, etime.AllTimes, "RunName")
- ss.Logs.AddStatStringItem(etime.AllModes, etime.Trial, "TrialName")
+// RunName returns the overall run name, used for naming output logs and weight files
+// based on params extra sheets and tag, and starting run number (for distributed runs).
+func (ss *Sim) RunName() string {
+ return ss.Current.StringValue("RunName", 1).String1D(0)
+}
- ss.Logs.AddStatAggItem("PhaseDiff", etime.Run, etime.Epoch, etime.Trial)
- ss.Logs.AddStatAggItem("UnitErr", etime.Run, etime.Epoch, etime.Trial)
- ss.Logs.AddErrStatAggItems("TrlErr", etime.Run, etime.Epoch, etime.Trial)
+// InitStats initializes all the stats by calling Start across all modes and levels.
+func (ss *Sim) InitStats() {
+ for md, st := range ss.Loops.Stacks {
+ mode := md.(Modes)
+ for _, lev := range st.Order {
+ level := lev.(Levels)
+ if level == Cycle {
+ continue
+ }
+ ss.RunStats(mode, level, Start)
+ }
+ }
+ if ss.GUI.Tabs != nil {
+ _, idx := ss.GUI.Tabs.CurrentTab()
+ ss.GUI.Tabs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Epoch))
+ ss.GUI.Tabs.PlotTensorFS(axon.StatsNode(ss.Stats, Train, Run))
+ ss.GUI.Tabs.PlotTensorFS(axon.StatsNode(ss.Stats, Test, Trial))
+ ss.GUI.Tabs.SelectTabIndex(idx)
+ }
+}
- ss.Logs.AddCopyFromFloatItems(etime.Train, []etime.Times{etime.Epoch, etime.Run}, etime.Test, etime.Epoch, "Tst", "PhaseDiff", "UnitErr", "PctCor", "PctErr")
+// ConfigStats handles configures functions to do all stats computation
+// in the tensorfs system.
+func (ss *Sim) ConfigStats() {
+ net := ss.Net
+ ss.Stats, _ = ss.Root.Mkdir("Stats")
+ ss.Current, _ = ss.Stats.Mkdir("Current")
- axon.LogAddPulvPhaseDiffItems(&ss.Logs, ss.Net, etime.Train, etime.Run, etime.Epoch, etime.Trial)
+ ss.SetRunName()
- ss.Logs.AddPerTrlMSec("PerTrlMSec", etime.Run, etime.Epoch, etime.Trial)
+ // last arg(s) are levels to exclude
+ counterFunc := axon.StatLoopCounters(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ counterFunc(mode, level, phase == Start)
+ })
+ runNameFunc := axon.StatRunName(ss.Stats, ss.Current, ss.Loops, net, Trial, Cycle)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ runNameFunc(mode, level, phase == Start)
+ })
- ss.ConfigLogItems()
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ if level != Trial {
+ return
+ }
+ name := "TrialName"
+ modeDir := ss.Stats.RecycleDir(mode.String())
+ curModeDir := ss.Current.RecycleDir(mode.String())
+ levelDir := modeDir.RecycleDir(level.String())
+ tsr := levelDir.StringValue(name)
+ ndata := int(ss.Net.Context().NData)
+ if phase == Start {
+ tsr.SetNumRows(0)
+ if ps := plot.GetStylersFrom(tsr); ps == nil {
+ ps.Add(func(s *plot.Style) {
+ s.On = false
+ })
+ plot.SetStylersTo(tsr, ps)
+ }
+ return
+ }
+ for di := range ndata {
+ // saved in apply inputs
+ trlNm := curModeDir.StringValue(name, ndata).String1D(di)
+ tsr.AppendRowString(trlNm)
+ }
+ })
- layers := ss.Net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer)
- axon.LogAddDiagnosticItems(&ss.Logs, layers, etime.Train, etime.Epoch, etime.Trial)
- axon.LogInputLayer(&ss.Logs, ss.Net, etime.Train)
+ // up to a point, it is good to use loops over stats in one function,
+ // to reduce repetition of boilerplate.
+ statNames := []string{"CorSim", "UnitErr", "Err", "NZero", "FirstZero", "LastZero"}
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ for _, name := range statNames {
+ if name == "NZero" && (mode != Train || level == Trial) {
+ return
+ }
+ modeDir := ss.Stats.RecycleDir(mode.String())
+ curModeDir := ss.Current.RecycleDir(mode.String())
+ levelDir := modeDir.RecycleDir(level.String())
+ subDir := modeDir.RecycleDir((level - 1).String()) // note: will fail for Cycle
+ tsr := levelDir.Float64(name)
+ ndata := int(ss.Net.Context().NData)
+ var stat float64
+ if phase == Start {
+ tsr.SetNumRows(0)
+ if ps := plot.GetStylersFrom(tsr); ps == nil {
+ ps.Add(func(s *plot.Style) {
+ s.Range.SetMin(0).SetMax(1)
+ s.On = true
+ switch name {
+ case "NZero":
+ s.On = false
+ case "FirstZero", "LastZero":
+ if level < Run {
+ s.On = false
+ }
+ }
+ })
+ plot.SetStylersTo(tsr, ps)
+ }
+ switch name {
+ case "NZero":
+ if level == Epoch {
+ curModeDir.Float64(name, 1).SetFloat1D(0, 0)
+ }
+ case "FirstZero", "LastZero":
+ if level == Epoch {
+ curModeDir.Float64(name, 1).SetFloat1D(-1, 0)
+ }
+ }
+ continue
+ }
+ switch level {
+ case Trial:
+ out := ss.Net.LayerByName("InputP")
+ for di := range ndata {
+ var stat float64
+ switch name {
+ case "CorSim":
+ stat = 1.0 - float64(axon.LayerStates.Value(int(out.Index), int(di), int(axon.LayerPhaseDiff)))
+ case "UnitErr":
+ stat = out.PctUnitErr(ss.Net.Context())[di]
+ case "Err":
+ uniterr := curModeDir.Float64("UnitErr", ndata).Float1D(di)
+ stat = 1.0
+ if uniterr == 0 {
+ stat = 0
+ }
+ }
+ curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
+ tsr.AppendRowFloat(stat)
+ }
+ case Epoch:
+ nz := curModeDir.Float64("NZero", 1).Float1D(0)
+ switch name {
+ case "NZero":
+ err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
+ stat = curModeDir.Float64(name, 1).Float1D(0)
+ if err == 0 {
+ stat++
+ } else {
+ stat = 0
+ }
+ curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
+ case "FirstZero":
+ stat = curModeDir.Float64(name, 1).Float1D(0)
+ if stat < 0 && nz == 1 {
+ stat = curModeDir.Int("Epoch", 1).Float1D(0)
+ }
+ curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
+ case "LastZero":
+ stat = curModeDir.Float64(name, 1).Float1D(0)
+ if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
+ stat = curModeDir.Int("Epoch", 1).Float1D(0)
+ }
+ curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
+ default:
+ stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
+ }
+ tsr.AppendRowFloat(stat)
+ case Run:
+ switch name {
+ case "NZero", "FirstZero", "LastZero":
+ stat = subDir.Value(name).Float1D(-1)
+ default:
+ stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
+ }
+ tsr.AppendRowFloat(stat)
+ }
+ }
+ })
- axon.LogAddPCAItems(&ss.Logs, ss.Net, etime.Train, etime.Run, etime.Epoch, etime.Trial)
+ perTrlFunc := axon.StatPerTrialMSec(ss.Stats, "Err", Train, Trial)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ perTrlFunc(mode, level, phase == Start)
+ })
- ss.Logs.AddLayerTensorItems(ss.Net, "Act", etime.Test, etime.Trial, "InputLayer", "TargetLayer")
+ lays := net.LayersByType(axon.SuperLayer, axon.CTLayer, axon.TargetLayer)
+ actGeFunc := axon.StatLayerActGe(ss.Stats, net, Train, Trial, lays...)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ actGeFunc(mode, level, phase == Start)
+ })
- ss.Logs.PlotItems("PhaseDiff", "PctErr")
+ pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, lays...)
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur
+ pcaFunc(mode, level, phase == Start, trnEpc)
+ })
- ss.Logs.CreateTables()
- ss.Logs.SetContext(&ss.Stats, ss.Net)
- // don't plot certain combinations we don't use
- ss.Logs.NoPlot(etime.Train, etime.Cycle)
- ss.Logs.NoPlot(etime.Test, etime.Run)
- // note: Analyze not plotted by default
- ss.Logs.SetMeta(etime.Train, etime.Run, "LegendCol", "RunName")
+ stateFunc := axon.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Input", "InputP")
+ ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
+ stateFunc(mode, level, phase == Start)
+ })
}
-func (ss *Sim) ConfigLogItems() {
- layers := ss.Net.LayersByType(axon.SuperLayer, axon.TargetLayer, axon.CTLayer, axon.PulvinarLayer)
- for _, lnm := range layers {
- clnm := lnm
- ly := ss.Net.LayerByName(clnm)
- ss.Logs.AddItem(&elog.Item{
- Name: clnm + "_AvgCaDiff",
- Type: reflect.Float64,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(etime.Train, etime.Trial): func(ctx *elog.Context) {
- tsr := ctx.GetLayerSampleTensor(clnm, "CaDiff")
- avg := stats.MeanTensor(tsr)
- ctx.SetFloat64(avg)
- }, etime.Scope(etime.Train, etime.Epoch): func(ctx *elog.Context) {
- ctx.SetAgg(ctx.Mode, etime.Trial, stats.Mean)
- }}})
- ss.Logs.AddItem(&elog.Item{
- Name: clnm + "_Gnmda",
- Type: reflect.Float64,
- Range: minmax.F32{Max: 1},
- FixMin: true,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, etime.Trial): func(ctx *elog.Context) {
- tsr := ctx.GetLayerSampleTensor(clnm, "Gnmda")
- avg := stats.MeanTensor(tsr)
- ctx.SetFloat64(avg)
- }, etime.Scope(etime.Train, etime.Epoch): func(ctx *elog.Context) {
- ctx.SetAgg(ctx.Mode, etime.Trial, stats.Mean)
- }}})
- ss.Logs.AddItem(&elog.Item{
- Name: clnm + "_GgabaB",
- Type: reflect.Float64,
- Range: minmax.F32{Max: 1},
- FixMin: true,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, etime.Trial): func(ctx *elog.Context) {
- tsr := ctx.GetLayerSampleTensor(clnm, "GgabaB")
- avg := stats.MeanTensor(tsr)
- ctx.SetFloat64(avg)
- }, etime.Scope(etime.Train, etime.Epoch): func(ctx *elog.Context) {
- ctx.SetAgg(ctx.Mode, etime.Trial, stats.Mean)
- }}})
- ss.Logs.AddItem(&elog.Item{
- Name: clnm + "_SSGi",
- Type: reflect.Float64,
- Range: minmax.F32{Max: 1},
- FixMin: true,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, etime.Trial): func(ctx *elog.Context) {
- ctx.SetFloat32(ly.Pools[0].Inhib.SSGi)
- }, etime.Scope(etime.Train, etime.Epoch): func(ctx *elog.Context) {
- ctx.SetAgg(ctx.Mode, etime.Trial, stats.Mean)
- }}})
+// StatCounters returns counters string to show at bottom of netview.
+func (ss *Sim) StatCounters(mode, level enums.Enum) string {
+ counters := ss.Loops.Stacks[mode].CountersString()
+ vu := ss.NetViewUpdater(mode)
+ if vu == nil || vu.View == nil {
+ return counters
}
-}
-
-// Log is the main logging function, handles special things for different scopes
-func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
- if mode != etime.Analyze {
- ss.Context.Mode = mode // Also set specifically in a Loop callback.
+ di := vu.View.Di
+ counters += fmt.Sprintf(" Di: %d", di)
+ curModeDir := ss.Current.RecycleDir(mode.String())
+ if curModeDir.Node("TrialName") == nil {
+ return counters
}
- dt := ss.Logs.Table(mode, time)
- row := dt.Rows
-
- switch {
- case time == etime.Cycle:
- return
- case time == etime.Trial:
- for di := 0; di < ss.Config.Run.NData; di++ {
- ss.TrialStats(di)
- ss.StatCounters(di)
- ss.Logs.LogRowDi(mode, time, row, di)
- }
- return // don't do reg
+ counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
+ statNames := []string{"CorSim", "UnitErr", "Err"}
+ if level == Cycle || curModeDir.Node(statNames[0]) == nil {
+ return counters
}
-
- ss.Logs.LogRow(mode, time, row) // also logs to file, etc
-}
-
-////////////////////////////////////////////////////////////////////////////////////////////
-// Gui
-
-func (ss *Sim) ConfigNetView(nv *netview.NetView) {
- // nv.ViewDefaults()
- // nv.Scene().Camera.Pose.Pos.Set(0, 1.5, 3.0) // more "head on" than default which is more "top down"
- // nv.Scene().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
-
- nv.ConfigLabels(ss.Config.Env.InputNames)
-
- ly := nv.LayerByName("Targets")
- for li, lnm := range ss.Config.Env.InputNames {
- lbl := nv.LabelByName(lnm)
- lbl.Pose = ly.Pose
- lbl.Pose.Pos.Y += .2
- lbl.Pose.Pos.Z += .02
- lbl.Pose.Pos.X += 0.05 + float32(li)*.06
- lbl.Pose.Scale.SetMul(math32.Vec3(0.6, 0.4, 0.5))
+ for _, name := range statNames {
+ counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
+ return counters
}
+//////// GUI
+
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI() {
- title := "DeepAxon Finite State Automaton"
- ss.GUI.MakeBody(ss, "DeepFSA", title, `This demonstrates a basic DeepAxon model on the Finite State Automaton problem (e.g., the Reber grammar). See emergent on GitHub.
`)
+ title := "Axon Random Associator"
+ ss.GUI.MakeBody(ss, "ra25", title, `This demonstrates a basic Axon model. See emergent on GitHub.`)
+ ss.GUI.FS = ss.Root
+ ss.GUI.DataRoot = "Root"
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("Network")
nv.Options.MaxRecs = 300
nv.SetNet(ss.Net)
- ss.ViewUpdate.Config(nv, etime.Phase, etime.Phase)
- ss.ConfigNetView(nv)
- ss.GUI.ViewUpdate = &ss.ViewUpdate
+ ss.TrainUpdate.Config(nv, axon.Phase, ss.StatCounters)
+ ss.TestUpdate.Config(nv, axon.Phase, ss.StatCounters)
+ ss.GUI.OnStop = func(mode, level enums.Enum) {
+ vu := ss.NetViewUpdater(mode)
+ vu.UpdateWhenStopped(mode, level) // todo: carry this all the way through
+ }
- ss.GUI.AddPlots(title, &ss.Logs)
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
+ nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
+ ss.GUI.UpdateFiles()
+ ss.InitStats()
ss.GUI.FinalizeGUI(false)
- if ss.Config.Run.GPU {
- ss.Net.ConfigGPUnoGUI(&ss.Context)
- core.TheApp.AddQuitCleanFunc(func() {
- ss.Net.GPU.Destroy()
- })
- }
}
+// todo: persistent run log
func (ss *Sim) MakeToolbar(p *tree.Plan) {
- ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Init", Icon: icons.Update,
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
-
- ss.GUI.AddLooperCtrl(p, ss.Loops, []etime.Modes{etime.Train, etime.Test})
+ ss.GUI.AddLooperCtrl(p, ss.Loops)
- ////////////////////////////////////////////////
tree.Add(p, func(w *core.Separator) {})
- ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "Reset RunLog",
- Icon: icons.Reset,
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- tree.Add(p, func(w *core.Separator) {})
- ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "New Seed",
+ ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
+ Label: "New Seed",
Icon: icons.Add,
Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
Active: egui.ActiveAlways,
@@ -665,12 +723,13 @@ func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.RandSeeds.NewSeeds()
},
})
- ss.GUI.AddToolbarItem(p, egui.ToolbarItem{Label: "README",
+ ss.GUI.AddToolbarItem(p, egui.ToolbarItem{
+ Label: "README",
Icon: icons.FileMarkdown,
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
- core.TheApp.OpenURL("https://github.com/emer/axon/blob/main/examples/deep_fsa/README.md")
+ core.TheApp.OpenURL("https://github.com/emer/axon/blob/main/examples/ra25/README.md")
},
})
}
@@ -682,45 +741,25 @@ func (ss *Sim) RunGUI() {
}
func (ss *Sim) RunNoGUI() {
+ ss.Init()
+
if ss.Config.Params.Note != "" {
mpi.Printf("Note: %s\n", ss.Config.Params.Note)
}
if ss.Config.Log.SaveWeights {
mpi.Printf("Saving final weights per run\n")
}
- runName := ss.Params.RunName(ss.Config.Run.Run)
- ss.Stats.SetString("RunName", runName) // used for naming logs, stats, etc
- netName := ss.Net.Name
-
- elog.SetLogFile(&ss.Logs, ss.Config.Log.Trial, etime.Train, etime.Trial, "trl", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.Epoch, etime.Train, etime.Epoch, "epc", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.Run, etime.Train, etime.Run, "run", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.TestEpoch, etime.Test, etime.Epoch, "tst_epc", netName, runName)
- elog.SetLogFile(&ss.Logs, ss.Config.Log.TestTrial, etime.Test, etime.Trial, "tst_trl", netName, runName)
-
- netdata := ss.Config.Log.NetData
- if netdata {
- mpi.Printf("Saving NetView data from testing\n")
- ss.GUI.InitNetData(ss.Net, 200)
- }
-
- ss.Init()
- mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.NRuns, ss.Config.Run.Run)
- ss.Loops.GetLoop(etime.Train, etime.Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.NRuns)
-
- if ss.Config.Run.GPU {
- ss.Net.ConfigGPUnoGUI(&ss.Context)
- }
- mpi.Printf("Set NThreads to: %d\n", ss.Net.NThreads)
-
- ss.Loops.Run(etime.Train)
+ runName := ss.SetRunName()
+ netName := ss.Net.Name
+ cfg := &ss.Config.Log
+ axon.OpenLogFiles(ss.Loops, ss.Stats, netName, runName, [][]string{cfg.Train, cfg.Test})
- ss.Logs.CloseLogFiles()
+ mpi.Printf("Running %d Runs starting at %d\n", ss.Config.Run.Runs, ss.Config.Run.Run)
+ ss.Loops.Loop(Train, Run).Counter.SetCurMaxPlusN(ss.Config.Run.Run, ss.Config.Run.Runs)
- if netdata {
- ss.GUI.SaveNetData(ss.Stats.String("RunName"))
- }
+ ss.Loops.Run(Train)
- ss.Net.GPU.Destroy()
+ axon.CloseLogFiles(ss.Loops, ss.Stats, Cycle)
+ axon.GPURelease()
}
diff --git a/examples/deep_fsa/enumgen.go b/examples/deep_fsa/enumgen.go
new file mode 100644
index 00000000..daae20c8
--- /dev/null
+++ b/examples/deep_fsa/enumgen.go
@@ -0,0 +1,128 @@
+// Code generated by "core generate -add-types -add-funcs"; DO NOT EDIT.
+
+package main
+
+import (
+ "cogentcore.org/core/enums"
+)
+
+var _ModesValues = []Modes{0, 1}
+
+// ModesN is the highest valid value for type Modes, plus one.
+const ModesN Modes = 2
+
+var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1}
+
+var _ModesDescMap = map[Modes]string{0: ``, 1: ``}
+
+var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`}
+
+// String returns the string representation of this Modes value.
+func (i Modes) String() string { return enums.String(i, _ModesMap) }
+
+// SetString sets the Modes value from its string representation,
+// and returns an error if the string is invalid.
+func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") }
+
+// Int64 returns the Modes value as an int64.
+func (i Modes) Int64() int64 { return int64(i) }
+
+// SetInt64 sets the Modes value from an int64.
+func (i *Modes) SetInt64(in int64) { *i = Modes(in) }
+
+// Desc returns the description of the Modes value.
+func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) }
+
+// ModesValues returns all possible values for the type Modes.
+func ModesValues() []Modes { return _ModesValues }
+
+// Values returns all possible values for the type Modes.
+func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) }
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") }
+
+var _LevelsValues = []Levels{0, 1, 2, 3}
+
+// LevelsN is the highest valid value for type Levels, plus one.
+const LevelsN Levels = 4
+
+var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3}
+
+var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``}
+
+var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`}
+
+// String returns the string representation of this Levels value.
+func (i Levels) String() string { return enums.String(i, _LevelsMap) }
+
+// SetString sets the Levels value from its string representation,
+// and returns an error if the string is invalid.
+func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") }
+
+// Int64 returns the Levels value as an int64.
+func (i Levels) Int64() int64 { return int64(i) }
+
+// SetInt64 sets the Levels value from an int64.
+func (i *Levels) SetInt64(in int64) { *i = Levels(in) }
+
+// Desc returns the description of the Levels value.
+func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) }
+
+// LevelsValues returns all possible values for the type Levels.
+func LevelsValues() []Levels { return _LevelsValues }
+
+// Values returns all possible values for the type Levels.
+func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) }
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") }
+
+var _StatsPhaseValues = []StatsPhase{0, 1}
+
+// StatsPhaseN is the highest valid value for type StatsPhase, plus one.
+const StatsPhaseN StatsPhase = 2
+
+var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1}
+
+var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``}
+
+var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`}
+
+// String returns the string representation of this StatsPhase value.
+func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) }
+
+// SetString sets the StatsPhase value from its string representation,
+// and returns an error if the string is invalid.
+func (i *StatsPhase) SetString(s string) error {
+ return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase")
+}
+
+// Int64 returns the StatsPhase value as an int64.
+func (i StatsPhase) Int64() int64 { return int64(i) }
+
+// SetInt64 sets the StatsPhase value from an int64.
+func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) }
+
+// Desc returns the description of the StatsPhase value.
+func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) }
+
+// StatsPhaseValues returns all possible values for the type StatsPhase.
+func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues }
+
+// Values returns all possible values for the type StatsPhase.
+func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) }
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil }
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *StatsPhase) UnmarshalText(text []byte) error {
+ return enums.UnmarshalText(i, text, "StatsPhase")
+}
diff --git a/examples/deep_fsa/fsa_env.go b/examples/deep_fsa/fsa_env.go
index 8b7e9a6a..5283b50d 100644
--- a/examples/deep_fsa/fsa_env.go
+++ b/examples/deep_fsa/fsa_env.go
@@ -10,7 +10,6 @@ import (
"cogentcore.org/core/base/randx"
"cogentcore.org/core/tensor"
"github.com/emer/emergent/v2/env"
- "github.com/emer/emergent/v2/etime"
)
// FSAEnv generates states in a finite state automaton (FSA) which is a
@@ -29,7 +28,7 @@ type FSAEnv struct {
Labels tensor.String
// automaton state within FSA that we're in.
- AState env.CurPrvInt
+ AState env.CurPrev[int]
// number of next states in current state output (scalar).
NNext tensor.Int
@@ -62,19 +61,19 @@ type FSAEnv struct {
// InitTMat initializes matrix and labels to given size
func (ev *FSAEnv) InitTMat(nst int) {
- ev.TMat.SetShape([]int{nst, nst}, "cur", "next")
- ev.Labels.SetShape([]int{nst, nst}, "cur", "next")
+ ev.TMat.SetShapeSizes(nst, nst)
+ ev.Labels.SetShapeSizes(nst, nst)
ev.TMat.SetZeros()
ev.Labels.SetZeros()
- ev.NNext.SetShape([]int{1})
- ev.NextStates.SetShape([]int{nst})
- ev.NextLabels.SetShape([]int{nst})
+ ev.NNext.SetShapeSizes(1)
+ ev.NextStates.SetShapeSizes(nst)
+ ev.NextLabels.SetShapeSizes(nst)
}
// SetTMat sets given transition matrix probability and label
func (ev *FSAEnv) SetTMat(fm, to int, p float64, lbl string) {
- ev.TMat.Set([]int{fm, to}, p)
- ev.Labels.Set([]int{fm, to}, lbl)
+ ev.TMat.Set(p, fm, to)
+ ev.Labels.Set(lbl, fm, to)
}
// TMatReber sets the transition matrix to the standard Reber grammar FSA
@@ -104,7 +103,7 @@ func (ev *FSAEnv) Validate() error {
func (ev *FSAEnv) Label() string { return ev.Name }
-func (ev *FSAEnv) State(element string) tensor.Tensor {
+func (ev *FSAEnv) State(element string) tensor.Values {
switch element {
case "NNext":
return &ev.NNext
@@ -116,10 +115,6 @@ func (ev *FSAEnv) State(element string) tensor.Tensor {
return nil
}
-func (ev *FSAEnv) Actions() env.Elements {
- return nil
-}
-
// String returns the current state as a string
func (ev *FSAEnv) String() string {
nn := ev.NNext.Values[0]
@@ -129,8 +124,6 @@ func (ev *FSAEnv) String() string {
func (ev *FSAEnv) Init(run int) {
ev.Rand.NewRand(ev.RandSeed)
- ev.Tick.Scale = etime.Tick
- ev.Trial.Scale = etime.Trial
ev.Seq.Init()
ev.Tick.Init()
ev.Trial.Init()
@@ -149,17 +142,17 @@ func (ev *FSAEnv) NextState() {
ps := ev.TMat.Values[ri : ri+nst]
ls := ev.Labels.Values[ri : ri+nst]
nxt := randx.PChoose64(ps, &ev.Rand) // next state chosen at random
- ev.NextStates.Set1D(0, nxt)
- ev.NextLabels.Set1D(0, ls[nxt])
+ ev.NextStates.Set1D(nxt, 0)
+ ev.NextLabels.Set1D(ls[nxt], 0)
idx := 1
for i, p := range ps {
if i != nxt && p > 0 {
- ev.NextStates.Set1D(idx, i)
- ev.NextLabels.Set1D(idx, ls[i])
+ ev.NextStates.Set1D(i, idx)
+ ev.NextLabels.Set1D(ls[i], idx)
idx++
}
}
- ev.NNext.Set1D(0, idx)
+ ev.NNext.Set1D(idx, 0)
ev.AState.Set(nxt)
}
@@ -174,7 +167,7 @@ func (ev *FSAEnv) Step() bool {
return true
}
-func (ev *FSAEnv) Action(element string, input tensor.Tensor) {
+func (ev *FSAEnv) Action(element string, input tensor.Values) {
// nop
}
diff --git a/examples/deep_fsa/params.go b/examples/deep_fsa/params.go
index 3bb7ab68..03b753c5 100644
--- a/examples/deep_fsa/params.go
+++ b/examples/deep_fsa/params.go
@@ -1,139 +1,145 @@
-// Copyright (c) 2019, The Emergent Authors. All rights reserved.
+// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
- "github.com/emer/emergent/v2/params"
+ "github.com/emer/axon/v2/axon"
)
-// ParamSets is the default set of parameters -- Base is always applied, and others can be optionally
-// selected to apply on top of that
-var ParamSets = params.Sets{
+// LayerParams sets the minimal non-default params.
+// Base is always applied, and others can be optionally selected to apply on top of that.
+var LayerParams = axon.LayerSheets{
"Base": {
{Sel: "Layer", Doc: "generic layer params",
- Params: params.Params{
- ly.Inhib.ActAvg.Nominal = "0.15", // 0.15 best
- ly.Inhib.Layer.Gi = "1.0", // 1.0 > 1.1 v1.6.1
- ly.Inhib.Layer.FB = "1", // 1.0 > 0.5
- ly.Inhib.ActAvg.AdaptGi = "false", // not needed; doesn't engage
- ly.Learn.TrgAvgAct.SubMean = "1", // 1 > 0
- ly.Learn.TrgAvgAct.SynScaleRate = "0.005", // 0.005 > others
- ly.Learn.TrgAvgAct.ErrLRate = "0.02", // 0.02 def
- ly.Acts.Gbar.L = "0.2", // std
- ly.Acts.Decay.Act = "0.0", // 0 == 0.2
- ly.Acts.Decay.Glong = "0.0",
- ly.Acts.Dt.LongAvgTau = "20", // 20 > higher for objrec, lvis
- ly.Acts.Dend.GbarExp = "0.2", // 0.2 > 0.5 > 0.1 > 0
- ly.Acts.Dend.GbarR = "3", // 3 / 0.2 > 6 / 0.5
- ly.Acts.Dend.SSGi = "2", // 2 > 3
- ly.Acts.Dt.VmDendTau = "5", // old: 8 > 5 >> 2.81 -- big diff
- ly.Acts.AK.Gbar = "0.1",
- ly.Acts.NMDA.MgC = "1.4", // 1.4, 5 > 1.2, 0 ?
- ly.Acts.NMDA.Voff = "0",
- ly.Acts.NMDA.Gbar = "0.006",
- ly.Acts.GabaB.Gbar = "0.015", // 0.015 def -- makes no diff down to 0.008
- ly.Learn.LrnNMDA.Gbar = "0.006",
- ly.Acts.Sahp.Gbar = "0.1", //
- ly.Acts.Sahp.Off = "0.8", //
- ly.Acts.Sahp.Slope = "0.02", //
- ly.Acts.Sahp.CaTau = "10", //
- // ly.Learn.CaLearn.Dt.PTau = "60", // 60 for 300 cyc, 40 for 200 (scales linearly)
- // ly.Learn.CaLearn.Dt.DTau = "60", // "
- // ly.Learn.CaSpk.Dt.PTau = "60", // "
- // ly.Learn.CaSpk.Dt.DTau = "60", // "
+ Set: func(ly *axon.LayerParams) {
+ ly.Inhib.ActAvg.Nominal = 0.15 // 0.15 best
+ ly.Inhib.Layer.Gi = 1.0 // 1.0 > 1.1 v1.6.1
+ ly.Inhib.Layer.FB = 1 // 1.0 > 0.5
+ ly.Inhib.ActAvg.AdaptGi.SetBool(false) // not needed; doesn't engage
+ ly.Learn.TrgAvgAct.SubMean = 1 // 1 > 0
+ ly.Learn.TrgAvgAct.SynScaleRate = 0.005 // 0.005 > others
+ ly.Learn.TrgAvgAct.ErrLRate = 0.02 // 0.02 def
+ ly.Acts.Gbar.L = 0.2 // std
+ ly.Acts.Decay.Act = 0.0 // 0 == 0.2
+ ly.Acts.Decay.Glong = 0.0
+ ly.Acts.Dt.LongAvgTau = 20 // 20 > higher for objrec, lvis
+ ly.Acts.Dend.GbarExp = 0.2 // 0.2 > 0.5 > 0.1 > 0
+ ly.Acts.Dend.GbarR = 3 // 3 / 0.2 > 6 / 0.5
+ ly.Acts.Dend.SSGi = 2 // 2 > 3
+ ly.Acts.Dt.VmDendTau = 5 // old: 8 > 5 >> 2.81 -- big diff
+ ly.Acts.AK.Gbar = 0.1
+ ly.Acts.NMDA.MgC = 1.4 // 1.4, 5 > 1.2, 0 ?
+ ly.Acts.NMDA.Voff = 0
+ ly.Acts.NMDA.Gbar = 0.006
+ ly.Acts.GabaB.Gbar = 0.015 // 0.015 def -- makes no diff down to 0.008
+ ly.Learn.LrnNMDA.Gbar = 0.006
+ ly.Acts.Sahp.Gbar = 0.1 //
+ ly.Acts.Sahp.Off = 0.8 //
+ ly.Acts.Sahp.Slope = 0.02 //
+ ly.Acts.Sahp.CaTau = 10 //
+ // ly.Learn.CaLearn.Dt.PTau = 60 // 60 for 300 cyc, 40 for 200 (scales linearly)
+ // ly.Learn.CaLearn.Dt.DTau = 60 // "
+ // ly.Learn.CaSpk.Dt.PTau = 60 // "
+ // ly.Learn.CaSpk.Dt.DTau = 60 // "
}},
{Sel: ".SuperLayer", Doc: "super layer params",
- Params: params.Params{
- ly.Bursts.ThrRel = "0.1", // 0.1, 0.1 best
- ly.Bursts.ThrAbs = "0.1",
+ Set: func(ly *axon.LayerParams) {
+ ly.Bursts.ThrRel = 0.1 // 0.1, 0.1 best
+ ly.Bursts.ThrAbs = 0.1
}},
{Sel: ".InLay", Doc: "input layers need more inhibition",
- Params: params.Params{
- ly.Inhib.Layer.Gi = "0.9", // makes no diff
- ly.Inhib.ActAvg.Nominal = "0.15",
- ly.Acts.Clamp.Ge = "1.5",
+ Set: func(ly *axon.LayerParams) {
+ ly.Inhib.Layer.Gi = 0.9 // makes no diff
+ ly.Inhib.ActAvg.Nominal = 0.15
+ ly.Acts.Clamp.Ge = 1.5
}},
{Sel: ".CTLayer", Doc: "CT NMDA gbar factor is key",
- Params: params.Params{
- ly.Inhib.Layer.Gi = "2.2", // 2.2 > others
- ly.Inhib.Layer.FB = "1",
- ly.Acts.Dend.SSGi = "0", // 0 > higher -- kills nmda maint!
- ly.CT.GeGain = "2.0", // 2.0 > 1.5 for sure (v0.2.1+)
- // ly.CT.DecayTau = "80", // now auto-set
- ly.Acts.Decay.Act = "0.0",
- ly.Acts.Decay.Glong = "0.0",
- ly.Acts.GabaB.Gbar = "0.015", // 0.015 def > 0.01
- ly.Acts.MaintNMDA.Gbar = "0.007", // 0.007 best, but 0.01 > lower if reg nmda weak
- ly.Acts.MaintNMDA.Tau = "200", // 200 > 100 > 300
- ly.Acts.NMDA.Gbar = "0.007", // 0.007 matching maint best
- ly.Acts.NMDA.Tau = "200", // 200 > 100
- ly.Learn.TrgAvgAct.SynScaleRate = "0.005", // 0.005 > 0.0002 (much worse)
- ly.Learn.TrgAvgAct.SubMean = "1", // 1 > 0
+ Set: func(ly *axon.LayerParams) {
+ ly.Inhib.Layer.Gi = 2.2 // 2.2 > others
+ ly.Inhib.Layer.FB = 1
+ ly.Acts.Dend.SSGi = 0 // 0 > higher -- kills nmda maint!
+ ly.CT.GeGain = 2.0 // 2.0 > 1.5 for sure (v0.2.1+)
+ // ly.CT.DecayTau = "80 // now auto-set
+ ly.Acts.Decay.Act = 0.0
+ ly.Acts.Decay.Glong = 0.0
+ ly.Acts.GabaB.Gbar = 0.015 // 0.015 def > 0.01
+ ly.Acts.MaintNMDA.Gbar = 0.007 // 0.007 best, but 0.01 > lower if reg nmda weak
+ ly.Acts.MaintNMDA.Tau = 200 // 200 > 100 > 300
+ ly.Acts.NMDA.Gbar = 0.007 // 0.007 matching maint best
+ ly.Acts.NMDA.Tau = 200 // 200 > 100
+ ly.Learn.TrgAvgAct.SynScaleRate = 0.005 // 0.005 > 0.0002 (much worse)
+ ly.Learn.TrgAvgAct.SubMean = 1 // 1 > 0
}},
{Sel: ".PulvinarLayer", Doc: "pulvinar",
- Params: params.Params{
- ly.Inhib.Layer.Gi = "0.75", // 0.75 > higher v1.6.1
- ly.Inhib.Layer.FB = "1",
- ly.Pulv.DriveScale = "0.2", // 0.2 > 0.1, 0.15, 0.25, 0.3
- ly.Pulv.FullDriveAct = "0.6", // 0.6 def
- ly.Acts.Spikes.Tr = "3", // 1 is best for ra25..
- ly.Acts.Decay.Act = "0.0",
- ly.Acts.Decay.Glong = "0.0", // clear long
- ly.Acts.Decay.AHP = "0.0", // clear ahp
- ly.Learn.RLRate.SigmoidMin = "1.0", // 1 > 0.05 with CaSpkD as var
+ Set: func(ly *axon.LayerParams) {
+ ly.Inhib.Layer.Gi = 0.75 // 0.75 > higher v1.6.1
+ ly.Inhib.Layer.FB = 1
+ ly.Pulv.DriveScale = 0.2 // 0.2 > 0.1, 0.15, 0.25, 0.3
+ ly.Pulv.FullDriveAct = 0.6 // 0.6 def
+ ly.Acts.Spikes.Tr = 3 // 1 is best for ra25..
+ ly.Acts.Decay.Act = 0.0
+ ly.Acts.Decay.Glong = 0.0 // clear long
+ ly.Acts.Decay.AHP = 0.0 // clear ahp
+ ly.Learn.RLRate.SigmoidMin = 1.0 // 1 > 0.05 with CaSpkD as var
}},
+ },
+}
+
+// PathParams sets the minimal non-default params.
+// Base is always applied, and others can be optionally selected to apply on top of that.
+var PathParams = axon.PathSheets{
+ "Base": {
{Sel: "Path", Doc: "std",
- Params: params.Params{
- pt.Learn.Trace.SubMean = "0", // 0 > 1 -- even with CTCtxt = 0
- pt.Learn.LRate.Base = "0.03", // .03 > others -- same as CtCtxt
- pt.SWts.Adapt.LRate = "0.01", // 0.01 or 0.0001 music
- pt.SWts.Init.SPct = "1.0", // 1 works fine here -- .5 also ok
- pt.Com.PFail = "0.0",
- pt.Learn.Trace.Tau = "1", // 1 >> 2 v0.0.9
- pt.Learn.KinaseCa.CaGain = "1.0", // 1 > higher, lower
+ Set: func(pt *axon.PathParams) {
+ pt.Learn.Trace.SubMean = 0 // 0 > 1 -- even with CTCtxt = 0
+ pt.Learn.LRate.Base = 0.03 // .03 > others -- same as CtCtxt
+ pt.SWts.Adapt.LRate = 0.01 // 0.01 or 0.0001 music
+ pt.SWts.Init.SPct = 1.0 // 1 works fine here -- .5 also ok
+ pt.Learn.Trace.Tau = 1 // 1 >> 2 v0.0.9
+ pt.Learn.KinaseCa.CaGain = 1.0 // 1 > higher, lower
}},
{Sel: ".BackPath", Doc: "top-down back-pathways MUST have lower relative weight scale, otherwise network hallucinates",
- Params: params.Params{
- pt.PathScale.Rel = "0.2", // 0.2 > 0.3
+ Set: func(pt *axon.PathParams) {
+ pt.PathScale.Rel = 0.2 // 0.2 > 0.3
}},
{Sel: ".CTCtxtPath", Doc: "all CT context paths",
- Params: params.Params{
- pt.Learn.LRate.Base = "0.02", // 0.02 >= 0.03 > 0.01
- pt.Learn.Trace.Tau = "2", // 2 = 3 > 1 > 4 still v0.0.9
- pt.Learn.Trace.SubMean = "0", // 0 > 1 -- 1 is especially bad
+ Set: func(pt *axon.PathParams) {
+ pt.Learn.LRate.Base = 0.02 // 0.02 >= 0.03 > 0.01
+ pt.Learn.Trace.Tau = 2 // 2 = 3 > 1 > 4 still v0.0.9
+ pt.Learn.Trace.SubMean = 0 // 0 > 1 -- 1 is especially bad
}},
{Sel: ".CTFromSuper", Doc: "full > 1to1",
- Params: params.Params{
- pt.Learn.Learn = "true",
- pt.SWts.Init.Mean = "0.5",
- pt.SWts.Init.Var = "0.25",
+ Set: func(pt *axon.PathParams) {
+ pt.Learn.Learn.SetBool(true)
+ pt.SWts.Init.Mean = 0.5
+ pt.SWts.Init.Var = 0.25
}},
{Sel: ".CTSelfCtxt", Doc: "",
- Params: params.Params{
- pt.PathScale.Rel = "0.5", // 0.5 > 0.2 > 0.8
- pt.SWts.Init.Sym = "true", // true > false
+ Set: func(pt *axon.PathParams) {
+ pt.PathScale.Rel = 0.5 // 0.5 > 0.2 > 0.8
+ pt.SWts.Init.Sym.SetBool(true) // true > false
}},
{Sel: ".CTSelfMaint", Doc: "",
- Params: params.Params{
- pt.PathScale.Abs = "0.5", // 0.5 > 0.4, 0.3 > 0.8 (very bad)
- pt.Com.GType = "MaintG",
- pt.SWts.Init.Sym = "true", // no effect? not sure why
+ Set: func(pt *axon.PathParams) {
+ pt.PathScale.Abs = 0.5 // 0.5 > 0.4, 0.3 > 0.8 (very bad)
+ pt.Com.GType = axon.MaintG
+ pt.SWts.Init.Sym.SetBool(true) // no effect? not sure why
}},
// {Sel: ".CTSelfMaint", Doc: "",
- // Params: params.Params{
- // pt.PathScale.Rel = "0.1",
- // pt.SWts.Init.Sym = "true", // no effect? not sure why
+ // Set: func(pt *axon.PathParams) {
+ // pt.PathScale.Rel = 0.1
+ // pt.SWts.Init.Sym = true // no effect? not sure why
// }},
{Sel: ".FromPulv", Doc: "",
- Params: params.Params{
- pt.PathScale.Rel = "0.1", // 0.1 > 0.2
+ Set: func(pt *axon.PathParams) {
+ pt.PathScale.Rel = 0.1 // 0.1 > 0.2
}},
// {Sel: ".CTToPulv", Doc: "",
- // Params: params.Params{
- // // pt.Learn.LRate.Base = "0.1",
- // // pt.SWts.Adapt.SigGain = "1", // 1 does not work as well with any tested lrates
+ // Set: func(pt *axon.PathParams) {
+ // // pt.Learn.LRate.Base = 0.1
+ // // pt.SWts.Adapt.SigGain = 1 // 1 does not work as well with any tested lrates
// }},
},
}
diff --git a/examples/deep_fsa/typegen.go b/examples/deep_fsa/typegen.go
index c2e59a73..efc8a04d 100644
--- a/examples/deep_fsa/typegen.go
+++ b/examples/deep_fsa/typegen.go
@@ -1,4 +1,4 @@
-// Code generated by "core generate -add-types"; DO NOT EDIT.
+// Code generated by "core generate -add-types -add-funcs"; DO NOT EDIT.
package main
@@ -6,16 +6,26 @@ import (
"cogentcore.org/core/types"
)
-var _ = types.AddType(&types.Type{Name: "main.EnvConfig", IDName: "env-config", Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.", Fields: []types.Field{{Name: "Env", Doc: "env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"}, {Name: "UnitsPer", Doc: "number of units per localist output unit -- 1 works better than 5 here"}, {Name: "InputNames", Doc: "] names of input letters"}, {Name: "InputNameMap", Doc: "map of input names -- initialized during Configenv"}}})
+var _ = types.AddType(&types.Type{Name: "main.EnvConfig", IDName: "env-config", Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.", Fields: []types.Field{{Name: "Env", Doc: "env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"}, {Name: "UnitsPer", Doc: "number of units per localist output unit -- 1 works better than 5 here"}, {Name: "InputNames", Doc: "InputNames are names of input letters."}, {Name: "InputNameMap", Doc: "InputMap is the map of input names, initialized during ConfigEnv."}}})
-var _ = types.AddType(&types.Type{Name: "main.ParamConfig", IDName: "param-config", Doc: "ParamConfig has config parameters related to sim params", Fields: []types.Field{{Name: "Network", Doc: "network parameters"}, {Name: "Sheet", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"}, {Name: "Tag", Doc: "extra tag to add to file names and logs saved from this run"}, {Name: "Note", Doc: "user note -- describe the run params etc -- like a git commit message for the run"}, {Name: "File", Doc: "Name of the JSON file to input saved parameters from."}, {Name: "SaveAll", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"}, {Name: "Good", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."}}})
+var _ = types.AddType(&types.Type{Name: "main.ParamConfig", IDName: "param-config", Doc: "ParamConfig has config parameters related to sim params.", Fields: []types.Field{{Name: "Hidden1Size", Doc: "Hidden1Size is the size of hidden 1 layer."}, {Name: "Hidden2Size", Doc: "Hidden2Size is the size of hidden 2 layer."}, {Name: "Sheet", Doc: "Sheet is the extra params sheet name(s) to use (space separated\nif multiple). Must be valid name as listed in compiled-in params\nor loaded params."}, {Name: "Tag", Doc: "Tag is an extra tag to add to file names and logs saved from this run."}, {Name: "Note", Doc: "Note is additional info to describe the run params etc,\nlike a git commit message for the run."}, {Name: "SaveAll", Doc: "SaveAll will save a snapshot of all current param and config settings\nin a directory named params_ (or _good if Good is true),\nthen quit. Useful for comparing to later changes and seeing multiple\nviews of current params."}, {Name: "Good", Doc: "Good is for SaveAll, save to params_good for a known good params state.\nThis can be done prior to making a new release after all tests are passing.\nAdd results to git to provide a full diff record of all params over level."}}})
-var _ = types.AddType(&types.Type{Name: "main.RunConfig", IDName: "run-config", Doc: "RunConfig has config parameters related to running the sim", Fields: []types.Field{{Name: "GPU", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16"}, {Name: "NData", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. 16 learns just as well as 1 -- no diffs."}, {Name: "NThreads", Doc: "number of parallel threads for CPU computation -- 0 = use default"}, {Name: "Run", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"}, {Name: "NRuns", Doc: "total number of runs to do when running Train"}, {Name: "NEpochs", Doc: "total number of epochs per run"}, {Name: "NTrials", Doc: "total number of trials per epoch. Should be an even multiple of NData."}, {Name: "NCycles", Doc: "total number of cycles per trial. at least 200"}, {Name: "NPlusCycles", Doc: "total number of plus-phase cycles per trial. for NCycles=300, use 100"}, {Name: "PCAInterval", Doc: "how frequently (in epochs) to compute PCA on hidden representations to measure variance?"}, {Name: "TestInterval", Doc: "how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"}}})
+var _ = types.AddType(&types.Type{Name: "main.RunConfig", IDName: "run-config", Doc: "RunConfig has config parameters related to running the sim.", Fields: []types.Field{{Name: "GPU", Doc: "GPU uses the GPU for computation, generally faster than CPU even for\nsmall models if NData ~16."}, {Name: "NData", Doc: "NData is the number of data-parallel items to process in parallel per trial.\nIs significantly faster for both CPU and GPU. Results in an effective\nmini-batch of learning."}, {Name: "NThreads", Doc: "NThreads is the number of parallel threads for CPU computation;\n0 = use default."}, {Name: "Run", Doc: "Run is the _starting_ run number, which determines the random seed.\nNRuns counts up from there. Can do all runs in parallel by launching\nseparate jobs with each starting Run, NRuns = 1."}, {Name: "Runs", Doc: "Runs is the total number of runs to do when running Train, starting from Run."}, {Name: "Epochs", Doc: "Epochs is the total number of epochs per run."}, {Name: "Trials", Doc: "Trials is the total number of trials per epoch.\nShould be an even multiple of NData."}, {Name: "Cycles", Doc: "Cycles is the total number of cycles per trial: at least 200."}, {Name: "PlusCycles", Doc: "PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100."}, {Name: "NZero", Doc: "NZero is how many perfect, zero-error epochs before stopping a Run."}, {Name: "TestInterval", Doc: "TestInterval is how often (in epochs) to run through all the test patterns,\nin terms of training epochs. Can use 0 or -1 for no testing."}, {Name: "PCAInterval", Doc: "PCAInterval is how often (in epochs) to compute PCA on hidden\nrepresentations to measure variance."}, {Name: "StartWts", Doc: "StartWts is the name of weights file to load at start of first run."}}})
-var _ = types.AddType(&types.Type{Name: "main.LogConfig", IDName: "log-config", Doc: "LogConfig has config parameters related to logging data", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "SaveWeights", Doc: "if true, save final weights after each run"}, {Name: "Epoch", Doc: "if true, save train epoch log to file, as .epc.tsv typically"}, {Name: "Run", Doc: "if true, save run log to file, as .run.tsv typically"}, {Name: "Trial", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large."}, {Name: "TestEpoch", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."}, {Name: "TestTrial", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."}, {Name: "NetData", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview"}}})
+var _ = types.AddType(&types.Type{Name: "main.LogConfig", IDName: "log-config", Doc: "LogConfig has config parameters related to logging data.", Fields: []types.Field{{Name: "SaveWeights", Doc: "SaveWeights will save final weights after each run."}, {Name: "Train", Doc: "Train has the list of Train mode levels to save log files for."}, {Name: "Test", Doc: "Test has the list of Test mode levels to save log files for."}}})
-var _ = types.AddType(&types.Type{Name: "main.Config", IDName: "config", Doc: "Config is a standard Sim config -- use as a starting point.", Directives: []types.Directive{{Tool: "types", Directive: "add"}}, Fields: []types.Field{{Name: "Includes", Doc: "specify include files here, and after configuration, it contains list of include files added"}, {Name: "GUI", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits"}, {Name: "Debug", Doc: "log debugging information"}, {Name: "Env", Doc: "environment configuration options"}, {Name: "Params", Doc: "parameter related configuration options"}, {Name: "Run", Doc: "sim running related configuration options"}, {Name: "Log", Doc: "data logging related configuration options"}}})
+var _ = types.AddType(&types.Type{Name: "main.Config", IDName: "config", Doc: "Config is a standard Sim config -- use as a starting point.", Fields: []types.Field{{Name: "Includes", Doc: "Includes has a list of additional config files to include.\nAfter configuration, it contains list of include files added."}, {Name: "GUI", Doc: "GUI means open the GUI. Otherwise it runs automatically and quits,\nsaving results to log files."}, {Name: "Debug", Doc: "Debug reports debugging information."}, {Name: "Env", Doc: "Env has environment related configuration options."}, {Name: "Params", Doc: "Params has parameter related configuration options."}, {Name: "Run", Doc: "Run has sim running related configuration options."}, {Name: "Log", Doc: "Log has data logging related configuration options."}}})
-var _ = types.AddType(&types.Type{Name: "main.Sim", IDName: "sim", Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).", Fields: []types.Field{{Name: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args"}, {Name: "Net", Doc: "the network -- click to view / edit parameters for layers, paths, etc"}, {Name: "Params", Doc: "all parameter management"}, {Name: "Loops", Doc: "contains looper control loops for running sim"}, {Name: "Stats", Doc: "contains computed statistic values"}, {Name: "Logs", Doc: "Contains all the logs and information about the logs.'"}, {Name: "Envs", Doc: "Environments"}, {Name: "Context", Doc: "axon timing parameters and state"}, {Name: "ViewUpdate", Doc: "netview update parameters"}, {Name: "GUI", Doc: "manages all the gui elements"}, {Name: "RandSeeds", Doc: "a list of random seeds to use for each run"}}})
+var _ = types.AddType(&types.Type{Name: "main.Modes", IDName: "modes", Doc: "Modes are the looping modes (Stacks) for running and statistics."})
+
+var _ = types.AddType(&types.Type{Name: "main.Levels", IDName: "levels", Doc: "Levels are the looping levels for running and statistics."})
+
+var _ = types.AddType(&types.Type{Name: "main.StatsPhase", IDName: "stats-phase", Doc: "StatsPhase is the phase of stats processing for given mode, level.\nAccumulated values are reset at Start, added each Step."})
+
+var _ = types.AddType(&types.Type{Name: "main.Sim", IDName: "sim", Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).", Fields: []types.Field{{Name: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args"}, {Name: "Net", Doc: "Net is the network: click to view / edit parameters for layers, paths, etc."}, {Name: "Params", Doc: "Params manages network parameter setting."}, {Name: "Loops", Doc: "Loops are the the control loops for running the sim, in different Modes\nacross stacks of Levels."}, {Name: "Envs", Doc: "Envs provides mode-string based storage of environments."}, {Name: "TrainUpdate", Doc: "TrainUpdate has Train mode netview update parameters."}, {Name: "TestUpdate", Doc: "TestUpdate has Test mode netview update parameters."}, {Name: "Root", Doc: "Root is the root tensorfs directory, where all stats and other misc sim data goes."}, {Name: "Stats", Doc: "Stats has the stats directory within Root."}, {Name: "Current", Doc: "Current has the current stats values within Stats."}, {Name: "StatFuncs", Doc: "StatFuncs are statistics functions called at given mode and level,\nto perform all stats computations. phase = Start does init at start of given level,\nand all intialization / configuration (called during Init too)."}, {Name: "GUI", Doc: "GUI manages all the GUI elements"}, {Name: "RandSeeds", Doc: "RandSeeds is a list of random seeds to use for each run."}}})
var _ = types.AddType(&types.Type{Name: "main.FSAEnv", IDName: "fsa-env", Doc: "FSAEnv generates states in a finite state automaton (FSA) which is a\nsimple form of grammar for creating non-deterministic but still\noverall structured sequences.", Fields: []types.Field{{Name: "Name", Doc: "name of this environment"}, {Name: "TMat", Doc: "transition matrix, which is a square NxN tensor with outer dim being\ncurrent state and inner dim having probability of transitioning to that state."}, {Name: "Labels", Doc: "transition labels, one for each transition cell in TMat matrix."}, {Name: "AState", Doc: "automaton state within FSA that we're in."}, {Name: "NNext", Doc: "number of next states in current state output (scalar)."}, {Name: "NextStates", Doc: "next states that have non-zero probability, with actual randomly\nchosen next state at start."}, {Name: "NextLabels", Doc: "transition labels for next states that have non-zero probability,\nwith actual randomly chosen one for next state at start."}, {Name: "Seq", Doc: "sequence counter within epoch."}, {Name: "Tick", Doc: "tick counter within sequence."}, {Name: "Trial", Doc: "trial is the step counter within sequence, which is how many steps taken\nwithin current sequence. It resets to 0 at start of each sequence."}, {Name: "Rand", Doc: "random number generator for the env. all random calls must use this.\nset seed here for weight initialization values."}, {Name: "RandSeed", Doc: "random seed."}}})
+
+var _ = types.AddFunc(&types.Func{Name: "main.main"})
+
+var _ = types.AddFunc(&types.Func{Name: "main.RunSim", Doc: "RunSim runs the simulation with given configuration.", Args: []string{"cfg"}, Returns: []string{"error"}})
diff --git a/examples/ra25/ra25.go b/examples/ra25/ra25.go
index a0cf28a1..bc9725a9 100644
--- a/examples/ra25/ra25.go
+++ b/examples/ra25/ra25.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019, The Emergent Authors. All rights reserved.
+// Copyright (c) 2024, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -471,7 +471,7 @@ func (ss *Sim) ApplyInputs(mode Modes) {
net.InitExt()
for di := range ndata {
ev.Step()
- tensorfs.Value[string](curModeDir, "TrialName", ndata).SetString1D(ev.String(), di)
+ curModeDir.StringValue("TrialName", ndata).SetString1D(ev.String(), di)
for _, lnm := range lays {
ly := ss.Net.LayerByName(lnm)
st := ev.State(ly.Name)
@@ -576,14 +576,14 @@ func (ss *Sim) RunStats(mode Modes, level Levels, phase StatsPhase) {
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) SetRunName() string {
runName := ss.Params.RunName(ss.Config.Run.Run)
- tensorfs.Scalar[string](ss.Current, "RunName").SetString1D(runName, 0)
+ ss.Current.StringValue("RunName", 1).SetString1D(runName, 0)
return runName
}
// RunName returns the overall run name, used for naming output logs and weight files
// based on params extra sheets and tag, and starting run number (for distributed runs).
func (ss *Sim) RunName() string {
- return tensorfs.Scalar[string](ss.Current, "RunName").String1D(0)
+ return ss.Current.StringValue("RunName", 1).String1D(0)
}
// InitStats initializes all the stats by calling Start across all modes and levels.
@@ -634,7 +634,7 @@ func (ss *Sim) ConfigStats() {
modeDir := ss.Stats.RecycleDir(mode.String())
curModeDir := ss.Current.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
- tsr := tensorfs.Value[string](levelDir, name)
+ tsr := levelDir.StringValue(name)
ndata := int(ss.Net.Context().NData)
if phase == Start {
tsr.SetNumRows(0)
@@ -648,7 +648,7 @@ func (ss *Sim) ConfigStats() {
}
for di := range ndata {
// saved in apply inputs
- trlNm := tensorfs.Value[string](curModeDir, name, ndata).String1D(di)
+ trlNm := curModeDir.StringValue(name, ndata).String1D(di)
tsr.AppendRowString(trlNm)
}
})
@@ -665,7 +665,7 @@ func (ss *Sim) ConfigStats() {
curModeDir := ss.Current.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
subDir := modeDir.RecycleDir((level - 1).String()) // note: will fail for Cycle
- tsr := tensorfs.Value[float64](levelDir, name)
+ tsr := levelDir.Float64(name)
ndata := int(ss.Net.Context().NData)
var stat float64
if phase == Start {
@@ -688,11 +688,11 @@ func (ss *Sim) ConfigStats() {
switch name {
case "NZero":
if level == Epoch {
- tensorfs.Scalar[float64](curModeDir, name).SetFloat1D(0, 0)
+ curModeDir.Float64(name, 1).SetFloat1D(0, 0)
}
case "FirstZero", "LastZero":
if level == Epoch {
- tensorfs.Scalar[float64](curModeDir, name).SetFloat1D(-1, 0)
+ curModeDir.Float64(name, 1).SetFloat1D(-1, 0)
}
}
continue
@@ -708,39 +708,39 @@ func (ss *Sim) ConfigStats() {
case "UnitErr":
stat = out.PctUnitErr(ss.Net.Context())[di]
case "Err":
- uniterr := tensorfs.Value[float64](curModeDir, "UnitErr", ndata).Float1D(di)
+ uniterr := curModeDir.Float64("UnitErr", ndata).Float1D(di)
stat = 1.0
if uniterr == 0 {
stat = 0
}
}
- tensorfs.Value[float64](curModeDir, name, ndata).SetFloat1D(stat, di)
+ curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
}
case Epoch:
- nz := tensorfs.Scalar[float64](curModeDir, "NZero").Float1D(0)
+ nz := curModeDir.Float64("NZero", 1).Float1D(0)
switch name {
case "NZero":
err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0)
- stat = tensorfs.Scalar[float64](curModeDir, name).Float1D(0)
+ stat = curModeDir.Float64(name, 1).Float1D(0)
if err == 0 {
stat++
} else {
stat = 0
}
- tensorfs.Scalar[float64](curModeDir, name).SetFloat1D(stat, 0)
+ curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "FirstZero":
- stat = tensorfs.Scalar[float64](curModeDir, name).Float1D(0)
+ stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz == 1 {
- stat = tensorfs.Scalar[float64](curModeDir, "Epoch").Float1D(0)
+ stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
- tensorfs.Scalar[float64](curModeDir, name).SetFloat1D(stat, 0)
+ curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
case "LastZero":
- stat = tensorfs.Scalar[float64](curModeDir, name).Float1D(0)
+ stat = curModeDir.Float64(name, 1).Float1D(0)
if stat < 0 && nz >= float64(ss.Config.Run.NZero) {
- stat = tensorfs.Scalar[float64](curModeDir, "Epoch").Float1D(0)
+ stat = curModeDir.Int("Epoch", 1).Float1D(0)
}
- tensorfs.Scalar[float64](curModeDir, name).SetFloat1D(stat, 0)
+ curModeDir.Float64(name, 1).SetFloat1D(stat, 0)
default:
stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0)
}
@@ -793,13 +793,13 @@ func (ss *Sim) StatCounters(mode, level enums.Enum) string {
if curModeDir.Node("TrialName") == nil {
return counters
}
- counters += fmt.Sprintf(" TrialName: %s", tensorfs.Value[string](curModeDir, "TrialName").String1D(di))
+ counters += fmt.Sprintf(" TrialName: %s", curModeDir.StringValue("TrialName").String1D(di))
statNames := []string{"CorSim", "UnitErr", "Err"}
if level == Cycle || curModeDir.Node(statNames[0]) == nil {
return counters
}
for _, name := range statNames {
- counters += fmt.Sprintf(" %s: %.4g", name, tensorfs.Value[float64](curModeDir, name).Float1D(di))
+ counters += fmt.Sprintf(" %s: %.4g", name, curModeDir.Float64(name).Float1D(di))
}
return counters
}