Skip to content

Commit

Permalink
kinaseq updated, working for standard equations; will try to optimize…
Browse files Browse the repository at this point in the history
… summary function to save compute cost
  • Loading branch information
rcoreilly committed Jun 10, 2024
1 parent c4f0331 commit e60c1f4
Show file tree
Hide file tree
Showing 8 changed files with 861 additions and 842 deletions.
52 changes: 26 additions & 26 deletions axon/logging.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func LogAddGlobals(lg *elog.Logs, ctx *Context, mode etime.Modes, times ...etime
di := uint32(lctx.Di)
lctx.SetFloat32(GlbV(ctx, di, gv))
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

Check failure on line 70 in axon/logging.go

View workflow job for this annotation

GitHub Actions / build

cannot use ... in call to non-variadic lg.AddStdAggs

if gv == GvDA || gv == GvRew || gv == GvRewPred {
itm := lg.AddItem(&elog.Item{
Expand All @@ -86,7 +86,7 @@ func LogAddGlobals(lg *elog.Logs, ctx *Context, mode etime.Modes, times ...etime
}
lctx.SetFloat32(v)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

Check failure on line 89 in axon/logging.go

View workflow job for this annotation

GitHub Actions / build

cannot use ... in call to non-variadic lg.AddStdAggs

itm = lg.AddItem(&elog.Item{
Name: gnm + "_R",
Expand All @@ -103,7 +103,7 @@ func LogAddGlobals(lg *elog.Logs, ctx *Context, mode etime.Modes, times ...etime
}
lctx.SetFloat32(v)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

Check failure on line 106 in axon/logging.go

View workflow job for this annotation

GitHub Actions / build

cannot use ... in call to non-variadic lg.AddStdAggs
if gv == GvDA {
itm = lg.AddItem(&elog.Item{
Name: gnm + "_Neg",
Expand All @@ -121,7 +121,7 @@ func LogAddGlobals(lg *elog.Logs, ctx *Context, mode etime.Modes, times ...etime
}
lctx.SetFloat32(v)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

Check failure on line 124 in axon/logging.go

View workflow job for this annotation

GitHub Actions / build

cannot use ... in call to non-variadic lg.AddStdAggs
}
}
}
Expand All @@ -144,7 +144,7 @@ func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes,
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.Act.Minus.Avg)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

Check failure on line 147 in axon/logging.go

View workflow job for this annotation

GitHub Actions / build

cannot use ... in call to non-variadic lg.AddStdAggs

itm = lg.AddItem(&elog.Item{
Name: clnm + "_ActMMax",
Expand All @@ -156,7 +156,7 @@ func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes,
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.Act.Minus.Max)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

Check failure on line 159 in axon/logging.go

View workflow job for this annotation

GitHub Actions / build

cannot use ... in call to non-variadic lg.AddStdAggs

itm = lg.AddItem(&elog.Item{
Name: clnm + "_MaxGeM",
Expand All @@ -170,7 +170,7 @@ func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes,
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.LayerValues(uint32(ctx.Di)).ActAvg.AvgMaxGeM)
}}})
lg.AddStdAggs(itm, mode, times[:ntimes-1])
lg.AddStdAggs(itm, mode, times[:ntimes-1]...)

Check failure on line 173 in axon/logging.go

View workflow job for this annotation

GitHub Actions / build

cannot use ... in call to non-variadic lg.AddStdAggs

itm = lg.AddItem(&elog.Item{
Name: clnm + "_CorDiff",
Expand All @@ -181,7 +181,7 @@ func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes,
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(1.0 - ly.LayerValues(uint32(ctx.Di)).CorSim.Cor)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

Check failure on line 184 in axon/logging.go

View workflow job for this annotation

GitHub Actions / build

cannot use ... in call to non-variadic lg.AddStdAggs

itm = lg.AddItem(&elog.Item{
Name: clnm + "_GiMult",
Expand All @@ -192,7 +192,7 @@ func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes,
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.LayerValues(uint32(ctx.Di)).ActAvg.GiMult)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

Check failure on line 195 in axon/logging.go

View workflow job for this annotation

GitHub Actions / build

cannot use ... in call to non-variadic lg.AddStdAggs
}
}

Expand Down Expand Up @@ -242,7 +242,7 @@ func LogAddPCAItems(lg *elog.Logs, net *Network, mode etime.Modes, times ...etim
etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
ctx.SetStatFloat(ctx.Item.Name)
}}})
lg.AddStdAggs(itm, mode, times[:ntimes-1])
lg.AddStdAggs(itm, mode, times[:ntimes-1]...)

Check failure on line 245 in axon/logging.go

View workflow job for this annotation

GitHub Actions / build

cannot use ... in call to non-variadic lg.AddStdAggs

itm = lg.AddItem(&elog.Item{
Name: clnm + "_PCA_Top5",
Expand All @@ -251,7 +251,7 @@ func LogAddPCAItems(lg *elog.Logs, net *Network, mode etime.Modes, times ...etim
etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
ctx.SetStatFloat(ctx.Item.Name)
}}})
lg.AddStdAggs(itm, mode, times[:ntimes-1])
lg.AddStdAggs(itm, mode, times[:ntimes-1]...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_PCA_Next5",
Expand All @@ -260,7 +260,7 @@ func LogAddPCAItems(lg *elog.Logs, net *Network, mode etime.Modes, times ...etim
etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
ctx.SetStatFloat(ctx.Item.Name)
}}})
lg.AddStdAggs(itm, mode, times[:ntimes-1])
lg.AddStdAggs(itm, mode, times[:ntimes-1]...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_PCA_Rest",
Expand All @@ -269,7 +269,7 @@ func LogAddPCAItems(lg *elog.Logs, net *Network, mode etime.Modes, times ...etim
etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
ctx.SetStatFloat(ctx.Item.Name)
}}})
lg.AddStdAggs(itm, mode, times[:ntimes-1])
lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
}
}

Expand Down Expand Up @@ -319,7 +319,7 @@ func LogAddExtraDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.CaSpkP.Minus.Avg)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_CaSpkPMinusMax",
Expand All @@ -331,7 +331,7 @@ func LogAddExtraDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.CaSpkP.Minus.Max)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

lg.AddItem(&elog.Item{
Name: clnm + "_AvgDifAvg",
Expand Down Expand Up @@ -390,7 +390,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "NmdaCa")
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_MaxNmdaCa",
Expand All @@ -402,7 +402,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "NmdaCa")
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgVgccCa",
Expand All @@ -414,7 +414,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "VgccCaInt")
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_MaxVgccCa",
Expand All @@ -426,7 +426,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "VgccCaInt")
ctx.SetFloat64(stats.MaxTensor(tsr))
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgCaLrn",
Expand All @@ -438,7 +438,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "CaLrn")
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_MaxCaLrn",
Expand All @@ -450,7 +450,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "CaLrn")
ctx.SetFloat64(stats.MaxTensor(tsr))
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgAbsCaDiff",
Expand All @@ -463,7 +463,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
norm.AbsTensor(tsr)
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_MaxAbsCaDiff",
Expand All @@ -476,7 +476,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
norm.AbsTensor(tsr)
ctx.SetFloat64(stats.MaxTensor(tsr))
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgCaD",
Expand All @@ -488,7 +488,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "CaD")
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgCaSpkD",
Expand All @@ -501,7 +501,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
avg := stats.MeanTensor(tsr)
ctx.SetFloat64(avg)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgCaDiff",
Expand All @@ -513,7 +513,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
avg := stats.MeanTensor(tsr)
ctx.SetFloat64(avg)
}}})
lg.AddStdAggs(itm, mode, times)
lg.AddStdAggs(itm, mode, times...)

lg.AddItem(&elog.Item{
Name: clnm + "_CaDiffCorrel",
Expand Down
1 change: 1 addition & 0 deletions examples/choose/armaze/paradigms.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ func (ev *Env) ConfigGroupGoodBad() {
for gi := 0; gi < 2; gi++ {
var eff, mag, prob float32
var length int
// todo: with BLANovelInhib lr=0.01, this bias has reversed!?
if gi == 0 { // bad case: there is a small but significant left side bias, so make this on bad
length = cfg.LengthRange.Max
eff = cfg.EffortRange.Max
Expand Down
2 changes: 1 addition & 1 deletion examples/choose/configs/03_distance.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@
NDrives = 3
Paradigm = "GroupGoodBad"
LengthRange.Min = 4
LengthRange.Max = 8
LengthRange.Max = 12

140 changes: 140 additions & 0 deletions examples/kinaseq/config.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
// Copyright (c) 2023, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package main

// ParamConfig has config parameters related to sim params
type ParamConfig struct {

// gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdateThr. Path.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller.
SpikeG float32 `default:"8"`

// time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PathParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau)
SynTau float32 `default:"30" min:"1"`

// rate = 1 / tau
SynDt float32 `view:"-" json:"-" xml:"-" edit:"-"`

// network parameters
Network map[string]any

// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
Sheet string

// extra tag to add to file names and logs saved from this run
Tag string

// user note -- describe the run params etc -- like a git commit message for the run
Note string

// Name of the JSON file to input saved parameters from.
File string `nest:"+"`

// Save a snapshot of all current param and config settings in a directory named params_<datestamp> (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
SaveAll bool `nest:"+"`

// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
Good bool `nest:"+"`
}

func (pc *ParamConfig) Update() {
pc.SynDt = 1.0 / pc.SynTau
}

// RunConfig has config parameters related to running the sim
type RunConfig struct {
// Neuron runs the standard Neuron update equations, vs. Kinase experimental equations
Neuron bool `default:"false"`

// use the GPU for computation -- only for testing in this model -- not faster
GPU bool `default:"false"`

// number of parallel threads for CPU computation -- 0 = use default
NThreads int `default:"2"`

// starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
Run int `default:"0"`

// total number of runs to do when running Train
NRuns int `default:"1" min:"1"`

// total number of epochs per run
NEpochs int `default:"1"`

// total number of epochs per run
NTrials int `default:"10"` // 1000

MinusMSec int `default:"150"`

PlusMSec int `default:"50"`

ISIMSec int `default:"200"`

// firing rate, for testing
MinusHz, PlusHz float32 `default:"50"`

// additive difference in sending firing frequency relative to recv (recv has basic minus, plus)
SendDiffHz float32
}

// LogConfig has config parameters related to logging data
type LogConfig struct {

// if true, save final weights after each run
SaveWts bool

// if true, save cycle log to file, as .cyc.tsv typically
Cycle bool `default:"true" nest:"+"`

// if true, save network activation etc data from testing trials, for later viewing in netview
NetData bool
}

// Config is a standard Sim config -- use as a starting point.
type Config struct {

// clamp constant Ge value -- otherwise drive discrete spiking input
GeClamp bool `default:"false"`

// frequency of input spiking for !GeClamp mode
SpikeHz float32 `default:"50"`

// Raw synaptic excitatory conductance
Ge float32 `min:"0" step:"0.01" default:"2.0"`

// Inhibitory conductance
Gi float32 `min:"0" step:"0.01" default:"0.1"`

// total number of cycles to run
NCycles int `min:"10" default:"200"`

// when does excitatory input into neuron come on?
OnCycle int `min:"0" default:"10"`

// when does excitatory input into neuron go off?
OffCycle int `min:"0" default:"190"`

// how often to update display (in cycles)
UpdateInterval int `min:"1" default:"10"`

// specify include files here, and after configuration, it contains list of include files added
Includes []string

// open the GUI -- does not automatically run -- if false, then runs automatically and quits
GUI bool `default:"true"`

// log debugging information
Debug bool

// parameter related configuration options
Params ParamConfig `view:"add-fields"`

// sim running related configuration options
Run RunConfig `view:"add-fields"`

// data logging related configuration options
Log LogConfig `view:"add-fields"`
}

func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
Loading

0 comments on commit e60c1f4

Please sign in to comment.