Skip to content

Commit

Permalink
use type-specific shortcuts in tensorfs, and deep fsa updates
Browse files Browse the repository at this point in the history
  • Loading branch information
rcoreilly committed Nov 28, 2024
1 parent b4f0669 commit 1903362
Show file tree
Hide file tree
Showing 10 changed files with 860 additions and 669 deletions.
2 changes: 1 addition & 1 deletion axon/network.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion axon/network.goal
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ func (nt *Network) UpdateLayerMaps() {
nt.UpdateLayerNameMap()
nt.LayerClassMap = make(map[string][]string)
for _, ly := range nt.Layers {
cs := ly.Type.String() + ly.Class
cs := ly.Type.String() + " " + ly.Class
cls := strings.Split(cs, " ")
for _, cl := range cls {
if cl == "" {
Expand Down
30 changes: 15 additions & 15 deletions axon/simstats.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ func StatLoopCounters(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, ne
modeDir := statsDir.RecycleDir(mode.String())
curModeDir := currentDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
tsr := tensorfs.Value[int](levelDir, name)
tsr := levelDir.Int(name)
if start {
tsr.SetNumRows(0)
if ps := plot.GetStylersFrom(tsr); ps == nil {
Expand All @@ -151,22 +151,22 @@ func StatLoopCounters(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, ne
}
if level.Int64() == trialLevel.Int64() {
for di := range ndata {
tensorfs.Value[int](curModeDir, name, ndata).SetInt1D(0, di)
curModeDir.Int(name, ndata).SetInt1D(0, di)
}
}
continue
}
ctr := st.Loops[lev].Counter.Cur
if level.Int64() == trialLevel.Int64() {
for di := range ndata {
tensorfs.Value[int](curModeDir, name, ndata).SetInt1D(ctr, di)
curModeDir.Int(name, ndata).SetInt1D(ctr, di)
tsr.AppendRowInt(ctr)
if lev.Int64() == trialLevel.Int64() {
ctr++
}
}
} else {
tensorfs.Scalar[int](curModeDir, name).SetInt1D(ctr, 0)
curModeDir.Int(name, 1).SetInt1D(ctr, 0)
tsr.AppendRowInt(ctr)
}
}
Expand All @@ -193,9 +193,9 @@ func StatRunName(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, net *Ne
name := "RunName"
modeDir := statsDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
tsr := tensorfs.Value[string](levelDir, name)
tsr := levelDir.StringValue(name)
ndata := int(net.Context().NData)
runNm := tensorfs.Scalar[string](currentDir, name).String1D(0)
runNm := currentDir.StringValue(name, 1).String1D(0)

if start {
tsr.SetNumRows(0)
Expand Down Expand Up @@ -234,7 +234,7 @@ func StatPerTrialMSec(statsDir *tensorfs.Node, statName string, trainMode enums.
name := "PerTrialMSec"
modeDir := statsDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
tsr := tensorfs.Value[float64](levelDir, name)
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
if ps := plot.GetStylersFrom(tsr); ps == nil {
Expand Down Expand Up @@ -284,7 +284,7 @@ func StatLayerActGe(statsDir *tensorfs.Node, net *Network, trainMode, trialLevel
ly := net.LayerByName(lnm)
lpi := ly.Params.PoolIndex(0)
name := lnm + "_" + statName
tsr := tensorfs.Value[float64](levelDir, name)
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
if ps := plot.GetStylersFrom(tsr); ps == nil {
Expand Down Expand Up @@ -339,7 +339,7 @@ func StatLayerState(statsDir *tensorfs.Node, net *Network, smode, slevel enums.E
name := lnm + "_" + variable
sizes := []int{ndata}
sizes = append(sizes, ly.GetSampleShape().Sizes...)
tsr := tensorfs.Value[float64](levelDir, name, sizes...)
tsr := levelDir.Float64(name, sizes...)
if start {
tsr.SetNumRows(0)
continue
Expand Down Expand Up @@ -384,11 +384,11 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
ly := net.LayerByName(lnm)
sizes := []int{ndata}
sizes = append(sizes, ly.GetSampleShape().Sizes...)
vtsr := tensorfs.Value[float64](pcaDir, lnm, sizes...)
vecs := tensorfs.Value[float64](curModeDir, "PCA_Vecs_"+lnm).(*tensor.Float64)
vals := tensorfs.Value[float64](curModeDir, "PCA_Vals_"+lnm).(*tensor.Float64)
vtsr := pcaDir.Float64(lnm, sizes...)
vecs := curModeDir.Float64("PCA_Vecs_" + lnm)
vals := curModeDir.Float64("PCA_Vals_" + lnm)
if levi == 0 {
ltsr := tensorfs.Value[float64](curModeDir, "PCA_ActM_"+lnm, ly.GetSampleShape().Sizes...)
ltsr := curModeDir.Float64("PCA_ActM_"+lnm, ly.GetSampleShape().Sizes...)
if start {
vtsr.SetNumRows(0)
} else {
Expand All @@ -404,7 +404,7 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
if !start && levi == 1 {
if interval > 0 && epc%interval == 0 {
hasNew = true
covar := tensorfs.Value[float64](curModeDir, "PCA_Covar_"+lnm)
covar := curModeDir.Float64("PCA_Covar_" + lnm)
metric.CovarianceMatrixOut(metric.Covariance, vtsr, covar)
matrix.SVDOut(covar, vecs, vals)
ln := vals.Len()
Expand Down Expand Up @@ -433,7 +433,7 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
}
for si, statName := range statNames {
name := lnm + "_" + statName
tsr := tensorfs.Value[float64](levelDir, name)
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
if ps := plot.GetStylersFrom(tsr); ps == nil {
Expand Down
133 changes: 74 additions & 59 deletions examples/deep_fsa/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

package main

import "cogentcore.org/core/math32/vecint"

// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
// other params are set via the Env map data mechanism.
Expand All @@ -15,11 +17,11 @@ type EnvConfig struct {
// number of units per localist output unit -- 1 works better than 5 here
UnitsPer int `default:"1"`

// ] names of input letters
// InputNames are names of input letters.
InputNames []string `default:"['B','T','S','X','V','P','E']"`

// map of input names -- initialized during Configenv
InputNameMap map[string]int
// InputMap is the map of input names, initialized during ConfigEnv.
InputNameMap map[string]int `display:"-"`
}

// InitNameMap is called during ConfigEnv
Expand All @@ -33,115 +35,128 @@ func (cfg *EnvConfig) InitNameMap() {
}
}

// ParamConfig has config parameters related to sim params
// ParamConfig has config parameters related to sim params.
type ParamConfig struct {

// network parameters
Network map[string]any
// Hidden1Size is the size of hidden 1 layer.
Hidden1Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`

// Hidden2Size is the size of hidden 2 layer.
Hidden2Size vecint.Vector2i `default:"{'X':10,'Y':10}" nest:"+"`

// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
// Sheet is the extra params sheet name(s) to use (space separated
// if multiple). Must be valid name as listed in compiled-in params
// or loaded params.
Sheet string

// extra tag to add to file names and logs saved from this run
// Tag is an extra tag to add to file names and logs saved from this run.
Tag string

// user note -- describe the run params etc -- like a git commit message for the run
// Note is additional info to describe the run params etc,
// like a git commit message for the run.
Note string

// Name of the JSON file to input saved parameters from.
File string `nest:"+"`

// Save a snapshot of all current param and config settings in a directory named params_<datestamp> (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
// SaveAll will save a snapshot of all current param and config settings
// in a directory named params_<datestamp> (or _good if Good is true),
// then quit. Useful for comparing to later changes and seeing multiple
// views of current params.
SaveAll bool `nest:"+"`

// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
// Good is for SaveAll, save to params_good for a known good params state.
// This can be done prior to making a new release after all tests are passing.
// Add results to git to provide a full diff record of all params over level.
Good bool `nest:"+"`
}

// RunConfig has config parameters related to running the sim
// RunConfig has config parameters related to running the sim.
type RunConfig struct {

// use the GPU for computation -- generally faster even for small models if NData ~16
// GPU uses the GPU for computation, generally faster than CPU even for
// small models if NData ~16.
GPU bool `default:"true"`

// number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. 16 learns just as well as 1 -- no diffs.
// NData is the number of data-parallel items to process in parallel per trial.
// Is significantly faster for both CPU and GPU. Results in an effective
// mini-batch of learning.
NData int `default:"16" min:"1"`

// number of parallel threads for CPU computation -- 0 = use default
// NThreads is the number of parallel threads for CPU computation;
// 0 = use default.
NThreads int `default:"0"`

// starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
// Run is the _starting_ run number, which determines the random seed.
// NRuns counts up from there. Can do all runs in parallel by launching
// separate jobs with each starting Run, NRuns = 1.
Run int `default:"0"`

// total number of runs to do when running Train
NRuns int `default:"5" min:"1"`
// Runs is the total number of runs to do when running Train, starting from Run.
Runs int `default:"5" min:"1"`

// Epochs is the total number of epochs per run.
Epochs int `default:"100"`

// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
Trials int `default:"32"`

// total number of epochs per run
NEpochs int `default:"100"`
// Cycles is the total number of cycles per trial: at least 200.
Cycles int `default:"200"`

// total number of trials per epoch. Should be an even multiple of NData.
NTrials int `default:"196"`
// PlusCycles is the total number of plus-phase cycles per trial. For Cycles=300, use 100.
PlusCycles int `default:"50"`

// total number of cycles per trial. at least 200
NCycles int `default:"200"`
// NZero is how many perfect, zero-error epochs before stopping a Run.
NZero int `default:"2"`

// total number of plus-phase cycles per trial. for NCycles=300, use 100
NPlusCycles int `default:"50"`
// TestInterval is how often (in epochs) to run through all the test patterns,
// in terms of training epochs. Can use 0 or -1 for no testing.
TestInterval int `default:"0"`

// how frequently (in epochs) to compute PCA on hidden representations to measure variance?
// PCAInterval is how often (in epochs) to compute PCA on hidden
// representations to measure variance.
PCAInterval int `default:"5"`

// how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
TestInterval int `default:"-1"`
// StartWts is the name of weights file to load at start of first run.
StartWts string
}

// LogConfig has config parameters related to logging data
type LogConfig struct { //types:add
// LogConfig has config parameters related to logging data.
type LogConfig struct {

// if true, save final weights after each run
// SaveWeights will save final weights after each run.
SaveWeights bool

// if true, save train epoch log to file, as .epc.tsv typically
Epoch bool `default:"true" nest:"+"`

// if true, save run log to file, as .run.tsv typically
Run bool `default:"true" nest:"+"`

// if true, save train trial log to file, as .trl.tsv typically. May be large.
Trial bool `default:"false" nest:"+"`

// if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
TestEpoch bool `default:"false" nest:"+"`

// if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
TestTrial bool `default:"false" nest:"+"`
// Train has the list of Train mode levels to save log files for.
Train []string `default:"['Run', 'Epoch']" nest:"+"`

// if true, save network activation etc data from testing trials, for later viewing in netview
NetData bool
// Test has the list of Test mode levels to save log files for.
Test []string `nest:"+"`
}

// Config is a standard Sim config -- use as a starting point.
type Config struct { //types:add
type Config struct {

// specify include files here, and after configuration, it contains list of include files added
// Includes has a list of additional config files to include.
// After configuration, it contains list of include files added.
Includes []string

// open the GUI -- does not automatically run -- if false, then runs automatically and quits
// GUI means open the GUI. Otherwise it runs automatically and quits,
// saving results to log files.
GUI bool `default:"true"`

// log debugging information
// Debug reports debugging information.
Debug bool

// environment configuration options
// Env has environment related configuration options.
Env EnvConfig `display:"add-fields"`

// parameter related configuration options
// Params has parameter related configuration options.
Params ParamConfig `display:"add-fields"`

// sim running related configuration options
// Run has sim running related configuration options.
Run RunConfig `display:"add-fields"`

// data logging related configuration options
// Log has data logging related configuration options.
Log LogConfig `display:"add-fields"`
}

Expand Down
Loading

0 comments on commit 1903362

Please sign in to comment.