From bd897844ff2bac605670eca9c72bbc3c720e6a65 Mon Sep 17 00:00:00 2001 From: "Randall C. O'Reilly" Date: Wed, 18 Dec 2024 23:46:51 -0800 Subject: [PATCH] hip opens but patterns need updated patgen --- axon/hip_net.go | 2 +- examples/hip/enumgen.go | 128 ++++++++++++++++++++++ examples/hip/hip.go | 232 ++++++++++++++++++++-------------------- examples/hip/params.go | 30 +++--- examples/hip/typegen.go | 24 +++-- 5 files changed, 277 insertions(+), 139 deletions(-) create mode 100644 examples/hip/enumgen.go diff --git a/axon/hip_net.go b/axon/hip_net.go index c2e2d7c8..8ff0866c 100644 --- a/axon/hip_net.go +++ b/axon/hip_net.go @@ -113,7 +113,7 @@ func (hip *HipConfig) Defaults() { // AddHip adds a new Hippocampal network for episodic memory. // Returns layers most likely to be used for remaining connections and positions. -func (net *Network) AddHip(ctx *Context, hip *HipConfig, space float32) (ec2, ec3, dg, ca3, ca1, ec5 *Layer) { +func (net *Network) AddHip(hip *HipConfig, space float32) (ec2, ec3, dg, ca3, ca1, ec5 *Layer) { // Trisynaptic Pathway (TSP) ec2 = net.AddLayer2D("EC2", SuperLayer, hip.EC2Size.Y, hip.EC2Size.X) ec2.SetSampleIndexesShape(emer.Layer2DSampleIndexes(ec2, 10)) diff --git a/examples/hip/enumgen.go b/examples/hip/enumgen.go new file mode 100644 index 00000000..daae20c8 --- /dev/null +++ b/examples/hip/enumgen.go @@ -0,0 +1,128 @@ +// Code generated by "core generate -add-types -add-funcs"; DO NOT EDIT. + +package main + +import ( + "cogentcore.org/core/enums" +) + +var _ModesValues = []Modes{0, 1} + +// ModesN is the highest valid value for type Modes, plus one. +const ModesN Modes = 2 + +var _ModesValueMap = map[string]Modes{`Train`: 0, `Test`: 1} + +var _ModesDescMap = map[Modes]string{0: ``, 1: ``} + +var _ModesMap = map[Modes]string{0: `Train`, 1: `Test`} + +// String returns the string representation of this Modes value. +func (i Modes) String() string { return enums.String(i, _ModesMap) } + +// SetString sets the Modes value from its string representation, +// and returns an error if the string is invalid. +func (i *Modes) SetString(s string) error { return enums.SetString(i, s, _ModesValueMap, "Modes") } + +// Int64 returns the Modes value as an int64. +func (i Modes) Int64() int64 { return int64(i) } + +// SetInt64 sets the Modes value from an int64. +func (i *Modes) SetInt64(in int64) { *i = Modes(in) } + +// Desc returns the description of the Modes value. +func (i Modes) Desc() string { return enums.Desc(i, _ModesDescMap) } + +// ModesValues returns all possible values for the type Modes. +func ModesValues() []Modes { return _ModesValues } + +// Values returns all possible values for the type Modes. +func (i Modes) Values() []enums.Enum { return enums.Values(_ModesValues) } + +// MarshalText implements the [encoding.TextMarshaler] interface. +func (i Modes) MarshalText() ([]byte, error) { return []byte(i.String()), nil } + +// UnmarshalText implements the [encoding.TextUnmarshaler] interface. +func (i *Modes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Modes") } + +var _LevelsValues = []Levels{0, 1, 2, 3} + +// LevelsN is the highest valid value for type Levels, plus one. +const LevelsN Levels = 4 + +var _LevelsValueMap = map[string]Levels{`Cycle`: 0, `Trial`: 1, `Epoch`: 2, `Run`: 3} + +var _LevelsDescMap = map[Levels]string{0: ``, 1: ``, 2: ``, 3: ``} + +var _LevelsMap = map[Levels]string{0: `Cycle`, 1: `Trial`, 2: `Epoch`, 3: `Run`} + +// String returns the string representation of this Levels value. +func (i Levels) String() string { return enums.String(i, _LevelsMap) } + +// SetString sets the Levels value from its string representation, +// and returns an error if the string is invalid. +func (i *Levels) SetString(s string) error { return enums.SetString(i, s, _LevelsValueMap, "Levels") } + +// Int64 returns the Levels value as an int64. +func (i Levels) Int64() int64 { return int64(i) } + +// SetInt64 sets the Levels value from an int64. +func (i *Levels) SetInt64(in int64) { *i = Levels(in) } + +// Desc returns the description of the Levels value. +func (i Levels) Desc() string { return enums.Desc(i, _LevelsDescMap) } + +// LevelsValues returns all possible values for the type Levels. +func LevelsValues() []Levels { return _LevelsValues } + +// Values returns all possible values for the type Levels. +func (i Levels) Values() []enums.Enum { return enums.Values(_LevelsValues) } + +// MarshalText implements the [encoding.TextMarshaler] interface. +func (i Levels) MarshalText() ([]byte, error) { return []byte(i.String()), nil } + +// UnmarshalText implements the [encoding.TextUnmarshaler] interface. +func (i *Levels) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Levels") } + +var _StatsPhaseValues = []StatsPhase{0, 1} + +// StatsPhaseN is the highest valid value for type StatsPhase, plus one. +const StatsPhaseN StatsPhase = 2 + +var _StatsPhaseValueMap = map[string]StatsPhase{`Start`: 0, `Step`: 1} + +var _StatsPhaseDescMap = map[StatsPhase]string{0: ``, 1: ``} + +var _StatsPhaseMap = map[StatsPhase]string{0: `Start`, 1: `Step`} + +// String returns the string representation of this StatsPhase value. +func (i StatsPhase) String() string { return enums.String(i, _StatsPhaseMap) } + +// SetString sets the StatsPhase value from its string representation, +// and returns an error if the string is invalid. +func (i *StatsPhase) SetString(s string) error { + return enums.SetString(i, s, _StatsPhaseValueMap, "StatsPhase") +} + +// Int64 returns the StatsPhase value as an int64. +func (i StatsPhase) Int64() int64 { return int64(i) } + +// SetInt64 sets the StatsPhase value from an int64. +func (i *StatsPhase) SetInt64(in int64) { *i = StatsPhase(in) } + +// Desc returns the description of the StatsPhase value. +func (i StatsPhase) Desc() string { return enums.Desc(i, _StatsPhaseDescMap) } + +// StatsPhaseValues returns all possible values for the type StatsPhase. +func StatsPhaseValues() []StatsPhase { return _StatsPhaseValues } + +// Values returns all possible values for the type StatsPhase. +func (i StatsPhase) Values() []enums.Enum { return enums.Values(_StatsPhaseValues) } + +// MarshalText implements the [encoding.TextMarshaler] interface. +func (i StatsPhase) MarshalText() ([]byte, error) { return []byte(i.String()), nil } + +// UnmarshalText implements the [encoding.TextUnmarshaler] interface. +func (i *StatsPhase) UnmarshalText(text []byte) error { + return enums.UnmarshalText(i, text, "StatsPhase") +} diff --git a/examples/hip/hip.go b/examples/hip/hip.go index 6e929250..a2c66c42 100644 --- a/examples/hip/hip.go +++ b/examples/hip/hip.go @@ -9,9 +9,7 @@ package main import ( "fmt" - "io/fs" - "cogentcore.org/core/base/errors" "cogentcore.org/core/base/metadata" "cogentcore.org/core/base/mpi" "cogentcore.org/core/base/randx" @@ -30,13 +28,13 @@ import ( "github.com/emer/emergent/v2/egui" "github.com/emer/emergent/v2/env" "github.com/emer/emergent/v2/looper" - "github.com/emer/emergent/v2/patgen" "github.com/emer/emergent/v2/paths" ) func main() { cfg := &Config{} cli.SetFromDefaults(cfg) + cfg.Defaults() opts := cli.DefaultOptions(cfg.Name, cfg.Title) opts.DefaultFiles = append(opts.DefaultFiles, "config.toml") cli.Run(opts, cfg, RunSim) @@ -137,8 +135,7 @@ func (ss *Sim) Run() { axon.GPUInit() axon.UseGPU = true } - // ss.ConfigInputs() - ss.OpenInputs() + ss.ConfigInputs() ss.ConfigEnv() ss.ConfigNet(ss.Net) ss.ConfigLoops() @@ -158,6 +155,76 @@ func (ss *Sim) Run() { } } +//////// Inputs + +func (ss *Sim) ConfigInputs() { + + // todo: redo patgen using tensorfs + + // hp := &ss.Config.Hip + // ecY := hp.EC3NPool.Y + // ecX := hp.EC3NPool.X + // plY := hp.EC3NNrn.Y // good idea to get shorter vars when used frequently + // plX := hp.EC3NNrn.X // makes much more readable + // npats := ss.Config.Run.NTrials + // pctAct := ss.Config.Mod.ECPctAct + // minDiff := ss.Config.Pat.MinDiffPct + // nOn := patgen.NFromPct(pctAct, plY*plX) + // ctxtflip := patgen.NFromPct(ss.Config.Pat.CtxtFlipPct, nOn) + // patgen.AddVocabEmpty(ss.PoolVocab, "empty", npats, plY, plX) + // patgen.AddVocabPermutedBinary(ss.PoolVocab, "A", npats, plY, plX, pctAct, minDiff) + // patgen.AddVocabPermutedBinary(ss.PoolVocab, "B", npats, plY, plX, pctAct, minDiff) + // patgen.AddVocabPermutedBinary(ss.PoolVocab, "C", npats, plY, plX, pctAct, minDiff) + // patgen.AddVocabPermutedBinary(ss.PoolVocab, "lA", npats, plY, plX, pctAct, minDiff) + // patgen.AddVocabPermutedBinary(ss.PoolVocab, "lB", npats, plY, plX, pctAct, minDiff) + // patgen.AddVocabPermutedBinary(ss.PoolVocab, "ctxt", 3, plY, plX, pctAct, minDiff) // totally diff + // + // for i := 0; i < (ecY-1)*ecX*3; i++ { // 12 contexts! 1: 1 row of stimuli pats; 3: 3 diff ctxt bases + // list := i / ((ecY - 1) * ecX) + // ctxtNm := fmt.Sprintf("ctxt%d", i+1) + // tsr, _ := patgen.AddVocabRepeat(ss.PoolVocab, ctxtNm, npats, "ctxt", list) + // patgen.FlipBitsRows(tsr, ctxtflip, ctxtflip, 1, 0) + // //todo: also support drifting + // //solution 2: drift based on last trial (will require sequential learning) + // //patgen.VocabDrift(ss.PoolVocab, ss.NFlipBits, "ctxt"+strconv.Itoa(i+1)) + // } + // + // patgen.InitPats(ss.TrainAB, "TrainAB", "TrainAB Pats", "Input", "EC5", npats, ecY, ecX, plY, plX) + // patgen.MixPats(ss.TrainAB, ss.PoolVocab, "Input", []string{"A", "B", "ctxt1", "ctxt2", "ctxt3", "ctxt4"}) + // patgen.MixPats(ss.TrainAB, ss.PoolVocab, "EC5", []string{"A", "B", "ctxt1", "ctxt2", "ctxt3", "ctxt4"}) + // + // patgen.InitPats(ss.TestAB, "TestAB", "TestAB Pats", "Input", "EC5", npats, ecY, ecX, plY, plX) + // patgen.MixPats(ss.TestAB, ss.PoolVocab, "Input", []string{"A", "empty", "ctxt1", "ctxt2", "ctxt3", "ctxt4"}) + // patgen.MixPats(ss.TestAB, ss.PoolVocab, "EC5", []string{"A", "B", "ctxt1", "ctxt2", "ctxt3", "ctxt4"}) + // + // patgen.InitPats(ss.TrainAC, "TrainAC", "TrainAC Pats", "Input", "EC5", npats, ecY, ecX, plY, plX) + // patgen.MixPats(ss.TrainAC, ss.PoolVocab, "Input", []string{"A", "C", "ctxt5", "ctxt6", "ctxt7", "ctxt8"}) + // patgen.MixPats(ss.TrainAC, ss.PoolVocab, "EC5", []string{"A", "C", "ctxt5", "ctxt6", "ctxt7", "ctxt8"}) + // + // patgen.InitPats(ss.TestAC, "TestAC", "TestAC Pats", "Input", "EC5", npats, ecY, ecX, plY, plX) + // patgen.MixPats(ss.TestAC, ss.PoolVocab, "Input", []string{"A", "empty", "ctxt5", "ctxt6", "ctxt7", "ctxt8"}) + // patgen.MixPats(ss.TestAC, ss.PoolVocab, "EC5", []string{"A", "C", "ctxt5", "ctxt6", "ctxt7", "ctxt8"}) + // + // patgen.InitPats(ss.PreTrainLure, "PreTrainLure", "PreTrainLure Pats", "Input", "EC5", npats, ecY, ecX, plY, plX) + // patgen.MixPats(ss.PreTrainLure, ss.PoolVocab, "Input", []string{"lA", "lB", "ctxt9", "ctxt10", "ctxt11", "ctxt12"}) // arbitrary ctxt here + // patgen.MixPats(ss.PreTrainLure, ss.PoolVocab, "EC5", []string{"lA", "lB", "ctxt9", "ctxt10", "ctxt11", "ctxt12"}) // arbitrary ctxt here + // + // patgen.InitPats(ss.TestLure, "TestLure", "TestLure Pats", "Input", "EC5", npats, ecY, ecX, plY, plX) + // patgen.MixPats(ss.TestLure, ss.PoolVocab, "Input", []string{"lA", "empty", "ctxt9", "ctxt10", "ctxt11", "ctxt12"}) // arbitrary ctxt here + // patgen.MixPats(ss.TestLure, ss.PoolVocab, "EC5", []string{"lA", "lB", "ctxt9", "ctxt10", "ctxt11", "ctxt12"}) // arbitrary ctxt here + // + // ss.TrainAll = ss.TrainAB.Clone() + // ss.TrainAll.AppendRows(ss.TrainAC) + // ss.TrainAll.AppendRows(ss.PreTrainLure) + // ss.TrainAll.MetaData["name"] = "TrainAll" + // ss.TrainAll.MetaData["desc"] = "All Training Patterns" + // + // ss.TestABAC = ss.TestAB.Clone() + // ss.TestABAC.AppendRows(ss.TestAC) + // ss.TestABAC.MetaData["name"] = "TestABAC" + // ss.TestABAC.MetaData["desc"] = "All Testing Patterns" +} + func (ss *Sim) ConfigEnv() { // Can be called multiple times -- don't re-create var trn, tst *env.FixedTable @@ -182,11 +249,11 @@ func (ss *Sim) ConfigEnv() { // note: names must be standard here! trn.Name = Train.String() - trn.Config(table.NewView(inputs)) + trn.Config(table.NewView(inputs)) // todo: TrainAB trn.Validate() tst.Name = Test.String() - tst.Config(table.NewView(inputs)) + tst.Config(table.NewView(inputs)) // todo: TestABAC tst.Sequential = true tst.Validate() @@ -201,34 +268,21 @@ func (ss *Sim) ConfigNet(net *axon.Network) { net.SetMaxData(ss.Config.Run.NData) net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0 - inp := net.AddLayer2D("Input", axon.InputLayer, 5, 5) - hid1 := net.AddLayer2D("Hidden1", axon.SuperLayer, ss.Config.Params.Hidden1Size.Y, ss.Config.Params.Hidden1Size.X) - hid2 := net.AddLayer2D("Hidden2", axon.SuperLayer, ss.Config.Params.Hidden2Size.Y, ss.Config.Params.Hidden2Size.X) - out := net.AddLayer2D("Output", axon.TargetLayer, 5, 5) - - // use this to position layers relative to each other - // hid2.PlaceRightOf(hid1, 2) - - // note: see emergent/path module for all the options on how to connect - // NewFull returns a new paths.Full connectivity pattern - full := paths.NewFull() - - net.ConnectLayers(inp, hid1, full, axon.ForwardPath) - net.BidirConnectLayers(hid1, hid2, full) - net.BidirConnectLayers(hid2, out, full) - - // net.LateralConnectLayerPath(hid1, full, &axon.HebbPath{}).SetType(InhibPath) - - // note: if you wanted to change a layer type from e.g., Target to Compare, do this: - // out.Type = axon.CompareLayer - // that would mean that the output layer doesn't reflect target values in plus phase - // and thus removes error-driven learning -- but stats are still computed. + hip := &ss.Config.Hip + in := net.AddLayer4D("Input", axon.InputLayer, hip.EC3NPool.Y, hip.EC3NPool.X, hip.EC3NNrn.Y, hip.EC3NNrn.X) + inToEc2 := paths.NewUniformRand() + inToEc2.PCon = ss.Config.Params.InToEc2PCon + onetoone := paths.NewOneToOne() + ec2, ec3, _, _, _, _ := net.AddHip(hip, 2) + net.ConnectLayers(in, ec2, inToEc2, axon.ForwardPath) + net.ConnectLayers(in, ec3, onetoone, axon.ForwardPath) + ec2.PlaceAbove(in) net.Build() net.Defaults() net.SetNThreads(ss.Config.Run.NThreads) ss.ApplyParams() - net.InitWeights() + // net.InitWeights() } func (ss *Sim) ApplyParams() { @@ -305,17 +359,17 @@ func (ss *Sim) ConfigLoops() { ls.Loop(Train, Run).OnStart.Add("NewRun", ss.NewRun) trainEpoch := ls.Loop(Train, Epoch) - trainEpoch.IsDone.AddBool("NZeroStop", func() bool { - stopNz := ss.Config.Run.NZero - if stopNz <= 0 { - return false - } - curModeDir := ss.Current.RecycleDir(Train.String()) - curNZero := int(curModeDir.Value("NZero").Float1D(-1)) - stop := curNZero >= stopNz - return stop - return false - }) + // trainEpoch.IsDone.AddBool("NZeroStop", func() bool { + // stopNz := ss.Config.Run.NZero + // if stopNz <= 0 { + // return false + // } + // curModeDir := ss.Current.RecycleDir(Train.String()) + // curNZero := int(curModeDir.Value("NZero").Float1D(-1)) + // stop := curNZero >= stopNz + // return stop + // return false + // }) trainEpoch.OnStart.Add("TestAtInterval", func() { if (ss.Config.Run.TestInterval > 0) && ((trainEpoch.Counter.Cur+1)%ss.Config.Run.TestInterval == 0) { @@ -375,10 +429,6 @@ func (ss *Sim) NewRun() { ss.Envs.ByMode(Test).Init(0) ctx.Reset() ss.Net.InitWeights() - if ss.Config.Run.StartWeights != "" { - ss.Net.OpenWeightsJSON(core.Filename(ss.Config.Run.StartWeights)) - mpi.Printf("Starting with initial weights from: %s\n", ss.Config.Run.StartWeights) - } } // TestAll runs through the full set of testing items @@ -388,43 +438,6 @@ func (ss *Sim) TestAll() { ss.Loops.Mode = Train // important because this is called from Train Run: go back. } -//////// Inputs - -func (ss *Sim) ConfigInputs() { - dt := table.New() - metadata.SetName(dt, "Train") - metadata.SetDoc(dt, "Training inputs") - dt.AddStringColumn("Name") - dt.AddFloat32Column("Input", 5, 5) - dt.AddFloat32Column("Output", 5, 5) - dt.SetNumRows(25) - - patgen.PermutedBinaryMinDiff(dt.ColumnByIndex(1).Tensor.(*tensor.Float32), 6, 1, 0, 3) - patgen.PermutedBinaryMinDiff(dt.ColumnByIndex(2).Tensor.(*tensor.Float32), 6, 1, 0, 3) - dt.SaveCSV("random_5x5_25_gen.tsv", tensor.Tab, table.Headers) - - tensorfs.DirFromTable(ss.Root.RecycleDir("Inputs/Train"), dt) -} - -// OpenTable opens a [table.Table] from embedded content, storing -// the data in the given tensorfs directory. -func (ss *Sim) OpenTable(dir *tensorfs.Node, fsys fs.FS, fnm, name, docs string) (*table.Table, error) { - dt := table.New() - metadata.SetName(dt, name) - metadata.SetDoc(dt, docs) - err := dt.OpenFS(content, fnm, tensor.Tab) - if errors.Log(err) != nil { - return dt, err - } - tensorfs.DirFromTable(dir.RecycleDir(name), dt) - return dt, err -} - -func (ss *Sim) OpenInputs() { - dir := ss.Root.RecycleDir("Inputs") - ss.OpenTable(dir, content, "random_5x5_25.tsv", "Train", "Training inputs") -} - //////// Stats // AddStat adds a stat compute function. @@ -591,29 +604,29 @@ func (ss *Sim) ConfigStats() { tsr.AppendRowFloat(stat) } case Epoch: - nz := curModeDir.Float64("NZero", 1).Float1D(0) + // nz := curModeDir.Float64("NZero", 1).Float1D(0) switch name { - case "NZero": - err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0) - stat = curModeDir.Float64(name, 1).Float1D(0) - if err == 0 { - stat++ - } else { - stat = 0 - } - curModeDir.Float64(name, 1).SetFloat1D(stat, 0) - case "FirstZero": - stat = curModeDir.Float64(name, 1).Float1D(0) - if stat < 0 && nz == 1 { - stat = curModeDir.Int("Epoch", 1).Float1D(0) - } - curModeDir.Float64(name, 1).SetFloat1D(stat, 0) - case "LastZero": - stat = curModeDir.Float64(name, 1).Float1D(0) - if stat < 0 && nz >= float64(ss.Config.Run.NZero) { - stat = curModeDir.Int("Epoch", 1).Float1D(0) - } - curModeDir.Float64(name, 1).SetFloat1D(stat, 0) + // case "NZero": + // err := stats.StatSum.Call(subDir.Value("Err")).Float1D(0) + // stat = curModeDir.Float64(name, 1).Float1D(0) + // if err == 0 { + // stat++ + // } else { + // stat = 0 + // } + // curModeDir.Float64(name, 1).SetFloat1D(stat, 0) + // case "FirstZero": + // stat = curModeDir.Float64(name, 1).Float1D(0) + // if stat < 0 && nz == 1 { + // stat = curModeDir.Int("Epoch", 1).Float1D(0) + // } + // curModeDir.Float64(name, 1).SetFloat1D(stat, 0) + // case "LastZero": + // stat = curModeDir.Float64(name, 1).Float1D(0) + // if stat < 0 && nz >= float64(ss.Config.Run.NZero) { + // stat = curModeDir.Int("Epoch", 1).Float1D(0) + // } + // curModeDir.Float64(name, 1).SetFloat1D(stat, 0) default: stat = stats.StatMean.Call(subDir.Value(name)).Float1D(0) } @@ -641,17 +654,6 @@ func (ss *Sim) ConfigStats() { actGeFunc(mode, level, phase == Start) }) - pcaFunc := axon.StatPCA(ss.Stats, ss.Current, net, ss.Config.Run.PCAInterval, Train, Trial, lays...) - ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) { - trnEpc := ss.Loops.Loop(Train, Epoch).Counter.Cur - pcaFunc(mode, level, phase == Start, trnEpc) - }) - - stateFunc := axon.StatLayerState(ss.Stats, net, Test, Trial, true, "ActM", "Input", "Output") - ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) { - stateFunc(mode, level, phase == Start) - }) - runAllFunc := axon.StatLevelAll(ss.Stats, Train, Run, func(s *plot.Style, cl tensor.Values) { name := metadata.Name(cl) switch name { diff --git a/examples/hip/params.go b/examples/hip/params.go index e5d1f156..54a5f030 100644 --- a/examples/hip/params.go +++ b/examples/hip/params.go @@ -19,8 +19,8 @@ var LayerParams = axon.LayerSheets{ // ly.Inhib.Pool.On = true // ly.Act.Gbar.L = 0.1 ly.Inhib.ActAvg.Nominal = 0.05 - ly.Inhib.Layer.On = false - ly.Inhib.Pool.On = true + ly.Inhib.Layer.On.SetBool(false) + ly.Inhib.Pool.On.SetBool(true) ly.Inhib.Pool.Gi = 1.1 ly.Acts.Clamp.Ge = 1.4 // ly.Learn.TrgAvgAct.SubMean = 0 @@ -56,8 +56,8 @@ var LayerParams = axon.LayerSheets{ {Sel: "#CA1", Doc: "CA1 only Pools", Set: func(ly *axon.LayerParams) { ly.Inhib.ActAvg.Nominal = 0.03 - ly.Inhib.Layer.On = false - ly.Inhib.Pool.On = true + ly.Inhib.Layer.On.SetBool(false) + ly.Inhib.Pool.On.SetBool(true) ly.Inhib.Pool.Gi = 1.1 // ly.Learn.TrgAvgAct.SubMean = 0 // ly.Learn.TrgAvgAct.On = false @@ -78,10 +78,10 @@ var PathParams = axon.PathSheets{ // }}, {Sel: ".InhibLateral", Doc: "circle lateral inhibitory connection -- good params, longer time, more ABmem", Set: func(pt *axon.PathParams) { - pt.Learn.Learn = false // ??? not sure + pt.Learn.Learn.SetBool(false) // ??? not sure // pt.SWts.Init.Mean = 1 // 0.1 was the standard Grid model as of 02242023 pt.SWts.Init.Var = 0 - pt.SWts.Init.Sym = false + pt.SWts.Init.Sym.SetBool(false) pt.PathScale.Abs = 0.1 // lower is better for spiking model? }}, // {Sel: ".EcCa1Path", Doc: "encoder pathways -- Abs only affecting ec3toca1 and ec5toca1, not ca1toec5", @@ -91,14 +91,14 @@ var PathParams = axon.PathSheets{ // }}, {Sel: ".HippoCHL", Doc: "hippo CHL pathways -- no norm, moment, but YES wtbal = sig better", Set: func(pt *axon.PathParams) { - pt.Learn.Learn = true + pt.Learn.Learn.SetBool(true) // pt.CHL.Hebb = 0.01 // .01 > .05? > .1? pt.Learn.LRate.Base = 0.2 // .2 }}, {Sel: ".PPath", Doc: "performant path, new Dg error-driven EcCa1Path paths", Set: func(pt *axon.PathParams) { // pt.PathScale.Abs = 0.8 // 0.8 helps preventing CA3 fixation - pt.Learn.Learn = true + pt.Learn.Learn.SetBool(true) pt.Learn.LRate.Base = 0.2 // err driven: .15 > .2 > .25 > .1 }}, {Sel: "#CA1ToEC5", Doc: "extra strong from CA1 to EC5", @@ -108,18 +108,18 @@ var PathParams = axon.PathSheets{ }}, {Sel: "#InputToEC2", Doc: "for CAN ec2", Set: func(pt *axon.PathParams) { - pt.PathScale.Rel = 2.0 // 2 vs. 1: memory much better, FirstPerfect generally longer - pt.Learn.Learn = false // no learning better + pt.PathScale.Rel = 2.0 // 2 vs. 1: memory much better, FirstPerfect generally longer + pt.Learn.Learn.SetBool(false) // no learning better }}, {Sel: "#InputToEC3", Doc: "one-to-one input to EC", Set: func(pt *axon.PathParams) { - pt.Learn.Learn = false + pt.Learn.Learn.SetBool(false) pt.SWts.Init.Mean = 0.8 pt.SWts.Init.Var = 0.0 }}, {Sel: "#EC3ToEC2", Doc: "copied from InputToEC2", Set: func(pt *axon.PathParams) { - pt.Learn.Learn = false // no learning better + pt.Learn.Learn.SetBool(false) // no learning better //pt.Learn.LRate.Base = 0.01 //pt.SWts.Init.Mean = 0.8 // 0.8 is for one to one deterministic connections, not for learning! //pt.SWts.Init.Var = "0 @@ -127,14 +127,14 @@ var PathParams = axon.PathSheets{ }}, {Sel: "#EC5ToEC3", Doc: "one-to-one out to in", Set: func(pt *axon.PathParams) { - pt.Learn.Learn = false + pt.Learn.Learn.SetBool(false) pt.SWts.Init.Mean = 0.9 pt.SWts.Init.Var = 0.01 pt.PathScale.Rel = 0.5 // was 0.5 }}, {Sel: "#DGToCA3", Doc: "Mossy fibers: strong, non-learning", Set: func(pt *axon.PathParams) { - pt.Learn.Learn = false // learning here definitely does NOT work! + pt.Learn.Learn.SetBool(false) // learning here definitely does NOT work! // pt.SWts.Init.Mean = 0.9 // commmenting this our prevents CA3 overactivation pt.SWts.Init.Var = 0.01 pt.PathScale.Rel = 4 // err del 4: 4 > 6 > 8 @@ -157,7 +157,7 @@ var PathParams = axon.PathSheets{ // pt.Hip.Err = 0.8 // pt.Hip.SAvgCor = 0.1 // pt.Hip.SNominal = 0.02 // !! need to keep it the same as actual layer Nominal - pt.Learn.Learn = true // absolutely essential to have on! learning slow if off. key for NoDGLearn + pt.Learn.Learn.SetBool(true) // absolutely essential to have on! learning slow if off. key for NoDGLearn pt.PathScale.Abs = 0.7 pt.Learn.LRate.Base = 0.2 }}, diff --git a/examples/hip/typegen.go b/examples/hip/typegen.go index cae0dfcf..501d14c9 100644 --- a/examples/hip/typegen.go +++ b/examples/hip/typegen.go @@ -1,4 +1,4 @@ -// Code generated by "core generate -add-types"; DO NOT EDIT. +// Code generated by "core generate -add-types -add-funcs"; DO NOT EDIT. package main @@ -6,16 +6,24 @@ import ( "cogentcore.org/core/types" ) -var _ = types.AddType(&types.Type{Name: "main.ParamConfig", IDName: "param-config", Doc: "ParamConfig has config parameters related to sim params", Fields: []types.Field{{Name: "Network", Doc: "network parameters"}, {Name: "Sheet", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"}, {Name: "Tag", Doc: "extra tag to add to file names and logs saved from this run"}, {Name: "Note", Doc: "user note -- describe the run params etc -- like a git commit message for the run"}, {Name: "File", Doc: "Name of the JSON file to input saved parameters from."}, {Name: "SaveAll", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"}, {Name: "Good", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."}}}) +var _ = types.AddType(&types.Type{Name: "main.EnvConfig", IDName: "env-config", Doc: "EnvConfig has config params for environment.", Fields: []types.Field{{Name: "Env", Doc: "Env parameters: can set any field/subfield on Env struct,\nusing standard TOML formatting."}, {Name: "MinDiffPct", Doc: "MinDiffPct is the minimum difference between item random patterns,\nas a proportion (0-1) of total active"}, {Name: "DriftCtxt", Doc: "DriftCtxt means use drifting context representations,\notherwise does bit flips from prototype."}, {Name: "CtxtFlipPct", Doc: "CtxtFlipPct is the proportion (0-1) of active bits to flip\nfor each context pattern, relative to a prototype, for non-drifting."}, {Name: "DriftPct", Doc: "DriftPct is percentage of active bits that drift, per step, for drifting context."}}}) -var _ = types.AddType(&types.Type{Name: "main.RunConfig", IDName: "run-config", Doc: "RunConfig has config parameters related to running the sim", Fields: []types.Field{{Name: "StopMem", Doc: "mem % correct level (proportion) above which training on current list stops (switch from AB to AC or stop on AC)"}, {Name: "GPU", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16"}, {Name: "NThreads", Doc: "number of parallel threads for CPU computation -- 0 = use default"}, {Name: "Run", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"}, {Name: "Runs", Doc: "total number of runs to do when running Train"}, {Name: "Epochs", Doc: "total number of epochs per run"}, {Name: "NTrials", Doc: "total number of trials per epoch. Should be an even multiple of NData."}, {Name: "NData", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."}, {Name: "TestInterval", Doc: "how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"}}}) +var _ = types.AddType(&types.Type{Name: "main.ParamConfig", IDName: "param-config", Doc: "ParamConfig has config parameters related to sim params.", Fields: []types.Field{{Name: "InToEc2PCon", Doc: "InToEc2PCon is percent connectivity from Input to EC2."}, {Name: "ECPctAct", Doc: "ECPctAct is percent activation in EC pool, used in patgen for input generation."}, {Name: "Sheet", Doc: "Sheet is the extra params sheet name(s) to use (space separated\nif multiple). Must be valid name as listed in compiled-in params\nor loaded params."}, {Name: "Tag", Doc: "Tag is an extra tag to add to file names and logs saved from this run."}, {Name: "Note", Doc: "Note is additional info to describe the run params etc,\nlike a git commit message for the run."}, {Name: "SaveAll", Doc: "SaveAll will save a snapshot of all current param and config settings\nin a directory named params_ (or _good if Good is true),\nthen quit. Useful for comparing to later changes and seeing multiple\nviews of current params."}, {Name: "Good", Doc: "Good is for SaveAll, save to params_good for a known good params state.\nThis can be done prior to making a new release after all tests are passing.\nAdd results to git to provide a full diff record of all params over level."}}}) -var _ = types.AddType(&types.Type{Name: "main.LogConfig", IDName: "log-config", Doc: "LogConfig has config parameters related to logging data", Fields: []types.Field{{Name: "SaveWeights", Doc: "if true, save final weights after each run"}, {Name: "Epoch", Doc: "if true, save train epoch log to file, as .epc.tsv typically"}, {Name: "Run", Doc: "if true, save run log to file, as .run.tsv typically"}, {Name: "Trial", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large."}, {Name: "TestEpoch", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."}, {Name: "TestTrial", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."}, {Name: "NetData", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview"}}}) +var _ = types.AddType(&types.Type{Name: "main.RunConfig", IDName: "run-config", Doc: "RunConfig has config parameters related to running the sim.", Fields: []types.Field{{Name: "GPU", Doc: "GPU uses the GPU for computation, generally faster than CPU even for\nsmall models if NData ~16."}, {Name: "NData", Doc: "NData is the number of data-parallel items to process in parallel per trial.\nIs significantly faster for both CPU and GPU. Results in an effective\nmini-batch of learning."}, {Name: "NThreads", Doc: "NThreads is the number of parallel threads for CPU computation;\n0 = use default."}, {Name: "MemThr", Doc: "MemThr is the threshold on proportion on / off error to count item as remembered"}, {Name: "StopMem", Doc: "StopMem is memory pct correct level (proportion) above which training\non current list stops (switch from AB to AC or stop on AC)."}, {Name: "Run", Doc: "Run is the _starting_ run number, which determines the random seed.\nNRuns counts up from there. Can do all runs in parallel by launching\nseparate jobs with each starting Run, NRuns = 1."}, {Name: "Runs", Doc: "Runs is the total number of runs to do when running Train, starting from Run."}, {Name: "Epochs", Doc: "Epochs is the total number of epochs per run."}, {Name: "Trials", Doc: "Trials is the total number of trials per epoch.\nShould be an even multiple of NData."}, {Name: "Cycles", Doc: "Cycles is the total number of cycles per trial: at least 200."}, {Name: "PlusCycles", Doc: "PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100."}, {Name: "TestInterval", Doc: "TestInterval is how often (in epochs) to run through all the test patterns,\nin terms of training epochs. Can use 0 or -1 for no testing."}}}) -var _ = types.AddType(&types.Type{Name: "main.PatConfig", IDName: "pat-config", Doc: "PatConfig have the pattern parameters", Fields: []types.Field{{Name: "MinDiffPct", Doc: "minimum difference between item random patterns, as a proportion (0-1) of total active"}, {Name: "DriftCtxt", Doc: "use drifting context representations -- otherwise does bit flips from prototype"}, {Name: "CtxtFlipPct", Doc: "proportion (0-1) of active bits to flip for each context pattern, relative to a prototype, for non-drifting"}, {Name: "DriftPct", Doc: "percentage of active bits that drift, per step, for drifting context"}}}) +var _ = types.AddType(&types.Type{Name: "main.LogConfig", IDName: "log-config", Doc: "LogConfig has config parameters related to logging data.", Fields: []types.Field{{Name: "SaveWeights", Doc: "SaveWeights will save final weights after each run."}, {Name: "Train", Doc: "Train has the list of Train mode levels to save log files for."}, {Name: "Test", Doc: "Test has the list of Test mode levels to save log files for."}}}) -var _ = types.AddType(&types.Type{Name: "main.ModConfig", IDName: "mod-config", Fields: []types.Field{{Name: "InToEc2PCon", Doc: "percent connectivity from Input to EC2"}, {Name: "ECPctAct", Doc: "percent activation in EC pool, used in patgen for input generation\npercent activation in EC pool, used in patgen for input generation"}, {Name: "MemThr", Doc: "memory threshold"}}}) +var _ = types.AddType(&types.Type{Name: "main.Config", IDName: "config", Doc: "Config has the overall Sim configuration options.", Fields: []types.Field{{Name: "Name", Doc: "Name is the short name of the sim."}, {Name: "Title", Doc: "Title is the longer title of the sim."}, {Name: "URL", Doc: "URL is a link to the online README or other documentation for this sim."}, {Name: "Doc", Doc: "Doc is brief documentation of the sim."}, {Name: "Includes", Doc: "Includes has a list of additional config files to include.\nAfter configuration, it contains list of include files added."}, {Name: "GUI", Doc: "GUI means open the GUI. Otherwise it runs automatically and quits,\nsaving results to log files."}, {Name: "Debug", Doc: "Debug reports debugging information."}, {Name: "Hip", Doc: "Hip has hippocampus sizing parameters."}, {Name: "Env", Doc: "Env has environment configuration options."}, {Name: "Params", Doc: "Params has parameter related configuration options."}, {Name: "Run", Doc: "Run has sim running related configuration options."}, {Name: "Log", Doc: "Log has data logging related configuration options."}}}) -var _ = types.AddType(&types.Type{Name: "main.Config", IDName: "config", Doc: "Config is a standard Sim config -- use as a starting point.", Fields: []types.Field{{Name: "Includes", Doc: "specify include files here, and after configuration, it contains list of include files added"}, {Name: "GUI", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits"}, {Name: "Debug", Doc: "log debugging information"}, {Name: "Mod", Doc: "misc model parameters"}, {Name: "Hip", Doc: "Hippocampus sizing parameters"}, {Name: "Pat", Doc: "parameters for the input patterns"}, {Name: "Params", Doc: "parameter related configuration options"}, {Name: "Run", Doc: "sim running related configuration options"}, {Name: "Log", Doc: "data logging related configuration options"}}}) +var _ = types.AddType(&types.Type{Name: "main.Modes", IDName: "modes", Doc: "Modes are the looping modes (Stacks) for running and statistics."}) -var _ = types.AddType(&types.Type{Name: "main.Sim", IDName: "sim", Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).", Fields: []types.Field{{Name: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args"}, {Name: "Net", Doc: "the network -- click to view / edit parameters for layers, paths, etc"}, {Name: "Params", Doc: "all parameter management"}, {Name: "Loops", Doc: "contains looper control loops for running sim"}, {Name: "Stats", Doc: "contains computed statistic values"}, {Name: "Logs", Doc: "Contains all the logs and information about the logs.'"}, {Name: "PretrainMode", Doc: "if true, run in pretrain mode"}, {Name: "PoolVocab", Doc: "pool patterns vocabulary"}, {Name: "TrainAB", Doc: "AB training patterns to use"}, {Name: "TrainAC", Doc: "AC training patterns to use"}, {Name: "TestAB", Doc: "AB testing patterns to use"}, {Name: "TestAC", Doc: "AC testing patterns to use"}, {Name: "PreTrainLure", Doc: "Lure pretrain patterns to use"}, {Name: "TestLure", Doc: "Lure testing patterns to use"}, {Name: "TrainAll", Doc: "all training patterns -- for pretrain"}, {Name: "TestABAC", Doc: "TestAB + TestAC"}, {Name: "Envs", Doc: "Environments"}, {Name: "Context", Doc: "axon timing parameters and state"}, {Name: "ViewUpdate", Doc: "netview update parameters"}, {Name: "GUI", Doc: "manages all the gui elements"}, {Name: "RandSeeds", Doc: "a list of random seeds to use for each run"}}}) +var _ = types.AddType(&types.Type{Name: "main.Levels", IDName: "levels", Doc: "Levels are the looping levels for running and statistics."}) + +var _ = types.AddType(&types.Type{Name: "main.StatsPhase", IDName: "stats-phase", Doc: "StatsPhase is the phase of stats processing for given mode, level.\nAccumulated values are reset at Start, added each Step."}) + +var _ = types.AddType(&types.Type{Name: "main.Sim", IDName: "sim", Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).", Fields: []types.Field{{Name: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args"}, {Name: "Net", Doc: "Net is the network: click to view / edit parameters for layers, paths, etc."}, {Name: "Params", Doc: "Params manages network parameter setting."}, {Name: "Loops", Doc: "Loops are the the control loops for running the sim, in different Modes\nacross stacks of Levels."}, {Name: "Envs", Doc: "Envs provides mode-string based storage of environments."}, {Name: "TrainUpdate", Doc: "TrainUpdate has Train mode netview update parameters."}, {Name: "TestUpdate", Doc: "TestUpdate has Test mode netview update parameters."}, {Name: "Root", Doc: "Root is the root tensorfs directory, where all stats and other misc sim data goes."}, {Name: "Stats", Doc: "Stats has the stats directory within Root."}, {Name: "Current", Doc: "Current has the current stats values within Stats."}, {Name: "StatFuncs", Doc: "StatFuncs are statistics functions called at given mode and level,\nto perform all stats computations. phase = Start does init at start of given level,\nand all intialization / configuration (called during Init too)."}, {Name: "GUI", Doc: "GUI manages all the GUI elements"}, {Name: "RandSeeds", Doc: "RandSeeds is a list of random seeds to use for each run."}}}) + +var _ = types.AddFunc(&types.Func{Name: "main.main"}) + +var _ = types.AddFunc(&types.Func{Name: "main.RunSim", Doc: "RunSim runs the simulation with given configuration.", Args: []string{"cfg"}, Returns: []string{"error"}})