From e60c1f49e76a49f5bd4fd6ac23632861a14bb0d7 Mon Sep 17 00:00:00 2001
From: "Randall C. O'Reilly"
Date: Mon, 10 Jun 2024 03:19:04 -0700
Subject: [PATCH] kinaseq updated, working for standard equations; will try to
optimize summary function to save compute cost
---
axon/logging.go | 52 +-
examples/choose/armaze/paradigms.go | 1 +
examples/choose/configs/03_distance.toml | 2 +-
examples/kinaseq/config.go | 140 +++++
examples/kinaseq/kinaseq.go | 470 +++++-----------
examples/kinaseq/neuron.go | 655 ++++++-----------------
examples/kinaseq/sim.go | 373 +++++++++++++
examples/neuron/neuron.go | 10 +-
8 files changed, 861 insertions(+), 842 deletions(-)
create mode 100644 examples/kinaseq/config.go
create mode 100644 examples/kinaseq/sim.go
diff --git a/axon/logging.go b/axon/logging.go
index b23a717db..33f81b31c 100644
--- a/axon/logging.go
+++ b/axon/logging.go
@@ -67,7 +67,7 @@ func LogAddGlobals(lg *elog.Logs, ctx *Context, mode etime.Modes, times ...etime
di := uint32(lctx.Di)
lctx.SetFloat32(GlbV(ctx, di, gv))
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
if gv == GvDA || gv == GvRew || gv == GvRewPred {
itm := lg.AddItem(&elog.Item{
@@ -86,7 +86,7 @@ func LogAddGlobals(lg *elog.Logs, ctx *Context, mode etime.Modes, times ...etime
}
lctx.SetFloat32(v)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: gnm + "_R",
@@ -103,7 +103,7 @@ func LogAddGlobals(lg *elog.Logs, ctx *Context, mode etime.Modes, times ...etime
}
lctx.SetFloat32(v)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
if gv == GvDA {
itm = lg.AddItem(&elog.Item{
Name: gnm + "_Neg",
@@ -121,7 +121,7 @@ func LogAddGlobals(lg *elog.Logs, ctx *Context, mode etime.Modes, times ...etime
}
lctx.SetFloat32(v)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
}
}
}
@@ -144,7 +144,7 @@ func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes,
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.Act.Minus.Avg)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_ActMMax",
@@ -156,7 +156,7 @@ func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes,
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.Act.Minus.Max)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_MaxGeM",
@@ -170,7 +170,7 @@ func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes,
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.LayerValues(uint32(ctx.Di)).ActAvg.AvgMaxGeM)
}}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1])
+ lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_CorDiff",
@@ -181,7 +181,7 @@ func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes,
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(1.0 - ly.LayerValues(uint32(ctx.Di)).CorSim.Cor)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_GiMult",
@@ -192,7 +192,7 @@ func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes,
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.LayerValues(uint32(ctx.Di)).ActAvg.GiMult)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
}
}
@@ -242,7 +242,7 @@ func LogAddPCAItems(lg *elog.Logs, net *Network, mode etime.Modes, times ...etim
etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
ctx.SetStatFloat(ctx.Item.Name)
}}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1])
+ lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_PCA_Top5",
@@ -251,7 +251,7 @@ func LogAddPCAItems(lg *elog.Logs, net *Network, mode etime.Modes, times ...etim
etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
ctx.SetStatFloat(ctx.Item.Name)
}}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1])
+ lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_PCA_Next5",
@@ -260,7 +260,7 @@ func LogAddPCAItems(lg *elog.Logs, net *Network, mode etime.Modes, times ...etim
etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
ctx.SetStatFloat(ctx.Item.Name)
}}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1])
+ lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_PCA_Rest",
@@ -269,7 +269,7 @@ func LogAddPCAItems(lg *elog.Logs, net *Network, mode etime.Modes, times ...etim
etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
ctx.SetStatFloat(ctx.Item.Name)
}}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1])
+ lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
}
}
@@ -319,7 +319,7 @@ func LogAddExtraDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.CaSpkP.Minus.Avg)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_CaSpkPMinusMax",
@@ -331,7 +331,7 @@ func LogAddExtraDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
ly := ctx.Layer(clnm).(AxonLayer).AsAxon()
ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.CaSpkP.Minus.Max)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
lg.AddItem(&elog.Item{
Name: clnm + "_AvgDifAvg",
@@ -390,7 +390,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "NmdaCa")
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_MaxNmdaCa",
@@ -402,7 +402,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "NmdaCa")
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgVgccCa",
@@ -414,7 +414,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "VgccCaInt")
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_MaxVgccCa",
@@ -426,7 +426,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "VgccCaInt")
ctx.SetFloat64(stats.MaxTensor(tsr))
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgCaLrn",
@@ -438,7 +438,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "CaLrn")
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_MaxCaLrn",
@@ -450,7 +450,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "CaLrn")
ctx.SetFloat64(stats.MaxTensor(tsr))
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgAbsCaDiff",
@@ -463,7 +463,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
norm.AbsTensor(tsr)
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_MaxAbsCaDiff",
@@ -476,7 +476,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
norm.AbsTensor(tsr)
ctx.SetFloat64(stats.MaxTensor(tsr))
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgCaD",
@@ -488,7 +488,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
tsr := ctx.GetLayerRepTensor(clnm, "CaD")
ctx.SetFloat64(stats.MeanTensor(tsr))
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgCaSpkD",
@@ -501,7 +501,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
avg := stats.MeanTensor(tsr)
ctx.SetFloat64(avg)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
Name: clnm + "_AvgCaDiff",
@@ -513,7 +513,7 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
avg := stats.MeanTensor(tsr)
ctx.SetFloat64(avg)
}}})
- lg.AddStdAggs(itm, mode, times)
+ lg.AddStdAggs(itm, mode, times...)
lg.AddItem(&elog.Item{
Name: clnm + "_CaDiffCorrel",
diff --git a/examples/choose/armaze/paradigms.go b/examples/choose/armaze/paradigms.go
index a95f1ffdd..3d6f5812b 100644
--- a/examples/choose/armaze/paradigms.go
+++ b/examples/choose/armaze/paradigms.go
@@ -38,6 +38,7 @@ func (ev *Env) ConfigGroupGoodBad() {
for gi := 0; gi < 2; gi++ {
var eff, mag, prob float32
var length int
+ // todo: with BLANovelInhib lr=0.01, this bias has reversed!?
if gi == 0 { // bad case: there is a small but significant left side bias, so make this on bad
length = cfg.LengthRange.Max
eff = cfg.EffortRange.Max
diff --git a/examples/choose/configs/03_distance.toml b/examples/choose/configs/03_distance.toml
index 4c95cae7b..ff41741c8 100644
--- a/examples/choose/configs/03_distance.toml
+++ b/examples/choose/configs/03_distance.toml
@@ -3,5 +3,5 @@
NDrives = 3
Paradigm = "GroupGoodBad"
LengthRange.Min = 4
-LengthRange.Max = 8
+LengthRange.Max = 12
diff --git a/examples/kinaseq/config.go b/examples/kinaseq/config.go
new file mode 100644
index 000000000..c4d24aca4
--- /dev/null
+++ b/examples/kinaseq/config.go
@@ -0,0 +1,140 @@
+// Copyright (c) 2023, The Emergent Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// ParamConfig has config parameters related to sim params
+type ParamConfig struct {
+
+ // gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdateThr. Path.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller.
+ SpikeG float32 `default:"8"`
+
+ // time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PathParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau)
+ SynTau float32 `default:"30" min:"1"`
+
+ // rate = 1 / tau
+ SynDt float32 `view:"-" json:"-" xml:"-" edit:"-"`
+
+ // network parameters
+ Network map[string]any
+
+ // Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
+ Sheet string
+
+ // extra tag to add to file names and logs saved from this run
+ Tag string
+
+ // user note -- describe the run params etc -- like a git commit message for the run
+ Note string
+
+ // Name of the JSON file to input saved parameters from.
+ File string `nest:"+"`
+
+ // Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
+ SaveAll bool `nest:"+"`
+
+ // for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
+ Good bool `nest:"+"`
+}
+
+func (pc *ParamConfig) Update() {
+ pc.SynDt = 1.0 / pc.SynTau
+}
+
+// RunConfig has config parameters related to running the sim
+type RunConfig struct {
+ // Neuron runs the standard Neuron update equations, vs. Kinase experimental equations
+ Neuron bool `default:"false"`
+
+ // use the GPU for computation -- only for testing in this model -- not faster
+ GPU bool `default:"false"`
+
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `default:"2"`
+
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `default:"0"`
+
+ // total number of runs to do when running Train
+ NRuns int `default:"1" min:"1"`
+
+ // total number of epochs per run
+ NEpochs int `default:"1"`
+
+ // total number of epochs per run
+ NTrials int `default:"10"` // 1000
+
+ MinusMSec int `default:"150"`
+
+ PlusMSec int `default:"50"`
+
+ ISIMSec int `default:"200"`
+
+ // firing rate, for testing
+ MinusHz, PlusHz float32 `default:"50"`
+
+ // additive difference in sending firing frequency relative to recv (recv has basic minus, plus)
+ SendDiffHz float32
+}
+
+// LogConfig has config parameters related to logging data
+type LogConfig struct {
+
+ // if true, save final weights after each run
+ SaveWts bool
+
+ // if true, save cycle log to file, as .cyc.tsv typically
+ Cycle bool `default:"true" nest:"+"`
+
+ // if true, save network activation etc data from testing trials, for later viewing in netview
+ NetData bool
+}
+
+// Config is a standard Sim config -- use as a starting point.
+type Config struct {
+
+ // clamp constant Ge value -- otherwise drive discrete spiking input
+ GeClamp bool `default:"false"`
+
+ // frequency of input spiking for !GeClamp mode
+ SpikeHz float32 `default:"50"`
+
+ // Raw synaptic excitatory conductance
+ Ge float32 `min:"0" step:"0.01" default:"2.0"`
+
+ // Inhibitory conductance
+ Gi float32 `min:"0" step:"0.01" default:"0.1"`
+
+ // total number of cycles to run
+ NCycles int `min:"10" default:"200"`
+
+ // when does excitatory input into neuron come on?
+ OnCycle int `min:"0" default:"10"`
+
+ // when does excitatory input into neuron go off?
+ OffCycle int `min:"0" default:"190"`
+
+ // how often to update display (in cycles)
+ UpdateInterval int `min:"1" default:"10"`
+
+ // specify include files here, and after configuration, it contains list of include files added
+ Includes []string
+
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `default:"true"`
+
+ // log debugging information
+ Debug bool
+
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
+
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
+
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
+}
+
+func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/kinaseq/kinaseq.go b/examples/kinaseq/kinaseq.go
index 31cfcbcbb..bdce2777f 100644
--- a/examples/kinaseq/kinaseq.go
+++ b/examples/kinaseq/kinaseq.go
@@ -1,300 +1,145 @@
-// Copyright (c) 2020, The Emergent Authors. All rights reserved.
+// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build this_is_broken_we_should_fix_or_delete
-
-// kinaseq plots kinase learning simulation over time
package main
-/*
import (
"fmt"
"math/rand"
+ "reflect"
- "cogentcore.org/core/icons"
- "cogentcore.org/core/ki"
"cogentcore.org/core/math32"
- "github.com/emer/axon/v2/axon"
- "github.com/emer/emergent/v2/emer"
- "cogentcore.org/core/tensor/stats/stats"
- "cogentcore.org/core/plot/plotview"
- "cogentcore.org/core/tensor/table"
+ "cogentcore.org/core/math32/minmax"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/etime"
)
-func main() {
- TheSim.Config()
- win := TheSim.ConfigGUI()
- win.StartEventLoop()
-}
-
-// LogPrec is precision for saving float values in logs
-const LogPrec = 4
-
-// Sim holds the params, table, etc
-type Sim struct {
-
- // the network -- click to view / edit parameters for layers, paths, etc
- Net *axon.Network `view:"no-inline"`
-
- // the sending neuron
- SendNeur *axon.Neuron `view:"no-inline"`
-
- // the receiving neuron
- RecvNeur *axon.Neuron `view:"no-inline"`
-
- // path-level parameters -- for intializing synapse -- other params not used
- Path *axon.Path `view:"no-inline"`
-
- // extra neuron state
- NeuronEx NeuronEx `view:"no-inline"`
-
- // all parameter management
- Params emer.Params `view:"inline"`
-
- // multiplier on product factor to equate to SynC
- PGain float32
-
- // spike multiplier for display purposes
- SpikeDisp float32
-
- // use current Ge clamping for recv neuron -- otherwise spikes driven externally
- RGeClamp bool
-
- // gain multiplier for RGe clamp
- RGeGain float32
-
- // baseline recv Ge level
- RGeBase float32
-
- // baseline recv Gi level
- RGiBase float32
-
- // number of repetitions -- if > 1 then only final @ end of Dur shown
- NTrials int
-
- // number of msec in minus phase
- MinusMsec int
+// KinaseState is basic Kinase equation state
+type KinaseState struct {
- // number of msec in plus phase
- PlusMsec int
+ // Condition counter
+ Condition int
- // quiet space between spiking
- ISIMsec int
+ // Condition description
+ Cond string
- // total trial msec: minus, plus isi
- TrialMsec int `view:"-"`
+ // Trial counter
+ Trial int
- // minus phase firing frequency
- MinusHz int
+ // Cycle counter
+ Cycle int
- // plus phase firing frequency
- PlusHz int
+ // phase-based firing rates
+ MinusHz, PlusHz float32
- // additive difference in sending firing frequency relative to recv (recv has basic minus, plus)
- SendDiffHz int
+ // Neuron spiking
+ SendSpike, RecvSpike float32
- // synapse state values, NST_ in log
- SynNeurTheta axon.Synapse `view:"no-inline"`
+ // Neuron probability of spiking
+ SendP, RecvP float32
- // synapse state values, SST_ in log
- SynSpkTheta axon.Synapse `view:"no-inline"`
+ // CaSyn is spike-driven calcium trace for synapse-level Ca-driven learning: exponential integration of SpikeG * Spike at SynTau time constant (typically 30). Synapses integrate send.CaSyn * recv.CaSyn across M, P, D time integrals for the synaptic trace driving credit assignment in learning. Time constant reflects binding time of Glu to NMDA and Ca buffering postsynaptically, and determines time window where pre * post spiking must overlap to drive learning.
+ SendCaSyn, RecvCaSyn float32
- // synapse state values, SSC_ in log
- SynSpkCont axon.Synapse `view:"no-inline"`
+ // CaM is first stage running average (mean) Ca calcium level (like CaM = calmodulin), feeds into CaP
+ CaM float32
- // synapse state values, SNC_ in log
- SynNMDACont axon.Synapse `view:"no-inline"`
+ // CaP is shorter timescale integrated CaM value, representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule
+ CaP float32
- // axon time recording
- Context axon.Context
+ // CaD is longer timescale integrated CaP value, representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule
+ CaD float32
- // all logs
- Logs map[string]*table.Table `view:"no-inline"`
+ // CaUpT is time in CyclesTotal of last updating of Ca values at the synapse level, for optimized synaptic-level Ca integration -- converted to / from uint32
+ CaUpT float32
- // all plots
- Plots map[string]*plotview.PlotView `view:"-"`
+ // DWt is the CaP - CaD
+ DWt float32
- // main GUI window
- Win *core.Window `view:"-"`
-
- // the master toolbar
- ToolBar *core.ToolBar `view:"-"`
-
- // stop button
- StopNow bool `view:"-"`
-}
-
-// TheSim is the overall state for this simulation
-var TheSim Sim
-
-// Config configures all the elements using the standard functions
-func (ss *Sim) Config() {
- ss.Net = &axon.Network{}
- ss.Params.Params = ParamSets
- ss.Params.AddNetwork(ss.Net)
- ss.Context.Defaults()
- ss.PGain = 1
- ss.SpikeDisp = 0.1
- ss.RGeClamp = true
- ss.RGeGain = 0.2
- ss.RGeBase = 0
- ss.RGiBase = 0
- ss.NTrials = 1000
- ss.MinusMsec = 150
- ss.PlusMsec = 50
- ss.ISIMsec = 200
- ss.MinusHz = 50
- ss.PlusHz = 25
- ss.Update()
- ss.ConfigNet(ss.Net)
- ss.ConfigTable(ss.Log("TrialLog"))
- ss.ConfigTable(ss.Log("RunLog"))
- ss.ConfigTable(ss.Log("DWtLog"))
- ss.ConfigTable(ss.Log("DWtVarLog"))
- ss.Init()
-}
-
-// Update updates computed values
-func (ss *Sim) Update() {
- ss.TrialMsec = ss.MinusMsec + ss.PlusMsec + ss.ISIMsec
-}
-
-// Init restarts the run and applies current parameters
-func (ss *Sim) Init() {
- ss.Params.SetAll()
- ss.Context.Reset()
- ss.Net.InitWts()
- ss.NeuronEx.Init()
- ss.InitSyn(&ss.SynNeurTheta)
- ss.InitSyn(&ss.SynSpkTheta)
- ss.InitSyn(&ss.SynSpkCont)
- ss.InitSyn(&ss.SynNMDACont)
-}
-
-// Log returns / makes log table of given name
-func (ss *Sim) Log(name string) *table.Table {
- if ss.Logs == nil {
- ss.Logs = make(map[string]*table.Table)
- }
- dt, ok := ss.Logs[name]
- if ok {
- return dt
- }
- dt = &table.Table{}
- ss.Logs[name] = dt
- return dt
+ // ErrDWt is the target error dwt
+ ErrDWt float32
}
-func (ss *Sim) Plot(name string) *plotview.PlotView {
- return ss.Plots[name]
-}
-
-func (ss *Sim) AddPlot(name string, plt *plotview.PlotView) {
- if ss.Plots == nil {
- ss.Plots = make(map[string]*plotview.PlotView)
- }
- ss.Plots[name] = plt
+func (ks *KinaseState) Init() {
+ ks.SendSpike = 0
+ ks.RecvSpike = 0
+ ks.SendP = 1
+ ks.RecvP = 1
+ ks.SendCaSyn = 0
+ ks.RecvCaSyn = 0
}
// Sweep runs a sweep through minus-plus ranges
func (ss *Sim) Sweep() {
- ss.Update()
- dt := ss.Log("DWtLog")
- dvt := ss.Log("DWtVarLog")
- rdt := ss.Log("RunLog")
-
- hz := []int{25, 50, 100}
+ hz := []float32{25, 50, 100}
nhz := len(hz)
- dt.SetNumRows(nhz * nhz)
- dvt.SetNumRows(nhz * nhz)
-
- row := 0
+ cond := 0
for mi := 0; mi < nhz; mi++ {
minusHz := hz[mi]
for pi := 0; pi < nhz; pi++ {
plusHz := hz[pi]
-
- ss.RunImpl(minusHz, plusHz, ss.NTrials)
-
- cond := fmt.Sprintf("%03d -> %03d", minusHz, plusHz)
- dwt := float64(plusHz-minusHz) / 100
- dt.SetFloat("ErrDWt", row, float64(dwt))
- dt.SetString("Cond", row, cond)
- dvt.SetFloat("ErrDWt", row, float64(dwt))
- dvt.SetString("Cond", row, cond)
-
- rix := table.NewIndexView(rdt)
- for ci := 2; ci < rdt.NumColumns(); ci++ {
- cnm := rdt.ColumnName(ci)
- mean := agg.Mean(rix, cnm)[0]
- sem := agg.Sem(rix, cnm)[0]
- dt.SetFloat(cnm, row, mean)
- dvt.SetFloat(cnm, row, sem)
- }
- row++
+ condStr := fmt.Sprintf("%03d -> %03d", minusHz, plusHz)
+ ss.Kinase.Condition = cond
+ ss.Kinase.Cond = condStr
+ ss.RunImpl(minusHz, plusHz, ss.Config.Run.NTrials)
+ cond++
}
}
- ss.Plot("DWtPlot").Update()
- ss.Plot("DWtVarPlot").Update()
+ // ss.Plot("DWtPlot").Update()
+ // ss.Plot("DWtVarPlot").Update()
}
// Run runs for given parameters
func (ss *Sim) Run() {
- ss.Update()
- ss.RunImpl(ss.MinusHz, ss.PlusHz, ss.NTrials)
+ cr := &ss.Config.Run
+ ss.RunImpl(cr.MinusHz, cr.PlusHz, cr.NTrials)
}
// RunImpl runs NTrials, recording to RunLog and TrialLog
-func (ss *Sim) RunImpl(minusHz, plusHz, ntrials int) {
- dt := ss.Log("RunLog")
- dt.SetNumRows(ntrials)
- ss.Context.Reset()
- for nr := 0; nr < ntrials; nr++ {
+func (ss *Sim) RunImpl(minusHz, plusHz float32, ntrials int) {
+ ss.Kinase.Init()
+ for trl := 0; trl < ntrials; trl++ {
+ ss.Kinase.Trial = trl
ss.TrialImpl(minusHz, plusHz)
- ss.LogState(dt, nr, nr, 0)
}
- ss.Plot("RunPlot").Update()
+ ss.Logs.LogRow(etime.Test, etime.Condition, ss.Kinase.Condition)
+ ss.GUI.UpdatePlot(etime.Test, etime.Condition)
}
func (ss *Sim) Trial() {
- ss.Update()
- ss.TrialImpl(ss.MinusHz, ss.PlusHz)
- ss.Plot("TrialPlot").Update()
+ cr := &ss.Config.Run
+ ss.Kinase.Init()
+ ss.TrialImpl(cr.MinusHz, cr.PlusHz)
}
// TrialImpl runs one trial for given parameters
-func (ss *Sim) TrialImpl(minusHz, plusHz int) {
- dt := ss.Log("TrialLog")
- dt.SetNumRows(ss.TrialMsec)
-
- nex := &ss.NeuronEx
- gi := ss.RGiBase
-
- ss.InitWts()
-
- ss.Context.NewState(true)
+func (ss *Sim) TrialImpl(minusHz, plusHz float32) {
+ cfg := &ss.Config
+ ks := &ss.Kinase
+ ks.MinusHz = minusHz
+ ks.PlusHz = plusHz
+ ks.Cycle = 0
for phs := 0; phs < 3; phs++ {
- var maxms, rhz int
+ var maxms int
+ var rhz float32
switch phs {
case 0:
rhz = minusHz
- maxms = ss.MinusMsec
+ maxms = cfg.Run.MinusMSec
case 1:
rhz = plusHz
- maxms = ss.PlusMsec
+ maxms = cfg.Run.PlusMSec
case 2:
rhz = 0
- maxms = ss.ISIMsec
+ maxms = cfg.Run.ISIMSec
}
- shz := rhz + ss.SendDiffHz
+ shz := rhz + cfg.Run.SendDiffHz
if shz < 0 {
shz = 0
}
- ge := ss.RGeBase + ss.RGeGain*RGeStimForHz(float32(rhz))
-
var Sint, Rint float32
if rhz > 0 {
Rint = math32.Exp(-1000.0 / float32(rhz))
@@ -303,118 +148,85 @@ func (ss *Sim) TrialImpl(minusHz, plusHz int) {
Sint = math32.Exp(-1000.0 / float32(shz))
}
for t := 0; t < maxms; t++ {
- cyc := ss.Context.Cycle
-
- sSpk := false
+ ks.SendSpike = 0
if Sint > 0 {
- nex.Sp *= rand.Float32()
- if nex.Sp <= Sint {
- sSpk = true
- nex.Sp = 1
+ ks.SendP *= rand.Float32()
+ if ks.SendP <= Sint {
+ ks.SendSpike = 1
+ ks.SendP = 1
}
}
+ ks.SendCaSyn += cfg.Params.SynDt * (cfg.Params.SpikeG*ks.SendSpike - ks.SendCaSyn)
- rSpk := false
+ ks.RecvSpike = 0
if Rint > 0 {
- nex.Rp *= rand.Float32()
- if nex.Rp <= Rint {
- rSpk = true
- nex.Rp = 1
+ ks.RecvP *= rand.Float32()
+ if ks.RecvP <= Rint {
+ ks.RecvSpike = 1
+ ks.RecvP = 1
}
}
+ ks.RecvCaSyn += cfg.Params.SynDt * (cfg.Params.SpikeG*ks.RecvSpike - ks.RecvCaSyn)
- ss.NeuronUpdate(sSpk, rSpk, ge, gi)
+ ca := ks.SendCaSyn * ks.RecvCaSyn
+ ss.CaParams.FromCa(ca, &ks.CaM, &ks.CaP, &ks.CaD)
- ss.LogState(dt, cyc, 0, cyc)
- ss.Context.CycleInc()
+ ss.Logs.LogRow(etime.Test, etime.Cycle, ks.Cycle)
+ ks.Cycle++
+ }
+ if phs == 1 {
+ ks.DWt = ks.CaP - ks.CaD
}
}
-}
-
-// ConfigGUI configures the Cogent Core GUI interface for this simulation.
-func (ss *Sim) ConfigGUI() *core.Window {
- width := 1600
- height := 1200
-
- // core.WinEventTrace = true
-
- core.SetAppName("kinaseq")
- core.SetAppAbout(`Exploration of kinase equations. See GitHub.
`)
-
- win := core.NewMainWindow("kinaseq", "Kinase Equation Exploration", width, height)
- ss.Win = win
-
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
-
- mfr := win.SetMainFrame()
-
- tbar := core.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := core.AddNewSplitView(mfr, "split")
- split.Dim = math32.X
- split.SetStretchMax()
-
- sv := views.AddNewStructView(split, "sv")
- sv.SetStruct(ss)
-
- tv := core.AddNewTabView(split, "tv")
- plt := tv.AddNewTab(plotview.KiT_PlotView, "RunPlot").(*plotview.PlotView)
- ss.AddPlot("RunPlot", ss.ConfigRunPlot(plt, ss.Log("RunLog")))
+ ks.ErrDWt = (plusHz - minusHz) / 100
- plt = tv.AddNewTab(plotview.KiT_PlotView, "TrialPlot").(*plotview.PlotView)
- ss.AddPlot("TrialPlot", ss.ConfigTrialPlot(plt, ss.Log("TrialLog")))
-
- plt = tv.AddNewTab(plotview.KiT_PlotView, "DWtPlot").(*plotview.PlotView)
- ss.AddPlot("DWtPlot", ss.ConfigDWtPlot(plt, ss.Log("DWtLog")))
-
- plt = tv.AddNewTab(plotview.KiT_PlotView, "DWtVarPlot").(*plotview.PlotView)
- ss.AddPlot("DWtVarPlot", ss.ConfigDWtPlot(plt, ss.Log("DWtVarLog")))
-
- split.SetSplits(.2, .8)
-
- tbar.AddAction(core.ActOpts{Label: "Init", Icon: icons.Update, Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send tree.Node, sig int64, data interface{}) {
- ss.Init()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(core.ActOpts{Label: "Trial", Icon: "step-fwd", Tooltip: "Run one trial of the equations and plot results in TrialPlot."}, win.This(), func(recv, send tree.Node, sig int64, data interface{}) {
- ss.Trial()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(core.ActOpts{Label: "Run", Icon: "play", Tooltip: "Run NTrials of the equations and plot results at end of each trial in RunPlot."}, win.This(), func(recv, send tree.Node, sig int64, data interface{}) {
- ss.Run()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(core.ActOpts{Label: "Sweep", Icon: "fast-fwd", Tooltip: "Sweep through minus-plus combinations and plot in DWtLogs."}, win.This(), func(recv, send tree.Node, sig int64, data interface{}) {
- ss.Sweep()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(core.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send tree.Node, sig int64, data interface{}) {
- core.TheApp.OpenURL("https://github.com/emer/axon/blob/master/examples/kinaseq/README.md")
- })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := core.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*core.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*core.Action)
- emen.Menu.AddCopyCutPaste(win)
+ ss.GUI.UpdatePlot(etime.Test, etime.Cycle)
+ ss.Logs.LogRow(etime.Test, etime.Trial, ks.Trial)
+ ss.GUI.UpdatePlot(etime.Test, etime.Trial)
+}
- win.MainMenuUpdated()
- return win
+func (ss *Sim) ConfigKinaseLogItems() {
+ lg := &ss.Logs
+
+ ks := &ss.Kinase
+ typ := reflect.TypeOf(*ks)
+ val := reflect.ValueOf(ks).Elem()
+ nf := typ.NumField()
+ for i := 0; i < nf; i++ {
+ field := typ.Field(i)
+ itm := lg.AddItem(&elog.Item{
+ Name: field.Name,
+ Type: field.Type.Kind(),
+ FixMax: false,
+ Range: minmax.F32{Max: 1},
+ Write: elog.WriteMap{
+ etime.Scope(etime.Test, etime.Cycle): func(ctx *elog.Context) {
+ switch field.Type.Kind() {
+ case reflect.Float32:
+ ctx.SetFloat32(val.Field(i).Interface().(float32))
+ case reflect.Int:
+ ctx.SetFloat32(float32(val.Field(i).Interface().(int)))
+ case reflect.String:
+ ctx.SetString(val.Field(i).Interface().(string))
+ }
+ },
+ }})
+ times := []etime.Times{etime.Condition, etime.Trial, etime.Cycle}
+ if field.Type.Kind() == reflect.Float32 {
+ lg.AddStdAggs(itm, etime.Test, times...)
+ } else {
+ tn := len(times)
+ for ti := 0; ti < tn-1; ti++ {
+ itm.Write[etime.Scope(etime.Test, times[ti])] = func(ctx *elog.Context) {
+ switch field.Type.Kind() {
+ case reflect.Int:
+ ctx.SetFloat32(float32(val.Field(i).Interface().(int)))
+ case reflect.String:
+ ctx.SetString(val.Field(i).Interface().(string))
+ }
+ }
+ }
+ }
+ }
}
-*/
diff --git a/examples/kinaseq/neuron.go b/examples/kinaseq/neuron.go
index 8df00006b..8f9da7c6c 100644
--- a/examples/kinaseq/neuron.go
+++ b/examples/kinaseq/neuron.go
@@ -1,534 +1,225 @@
-// Copyright (c) 2021, The Emergent Authors. All rights reserved.
+// Copyright (c) 2019, The Emergent Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build this_is_broken_we_should_fix_or_delete
-
package main
import (
+ "fmt"
"log"
- "strconv"
- "strings"
- "cogentcore.org/core/plot/plotview"
- "cogentcore.org/core/tensor"
- "cogentcore.org/core/tensor/table"
"github.com/emer/axon/v2/axon"
- "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/netparams"
"github.com/emer/emergent/v2/params"
"github.com/emer/emergent/v2/paths"
)
-// ParamSets for basic parameters
-// Base is always applied, and others can be optionally selected to apply on top of that
-var ParamSets = params.Sets{
- {Name: "Base", Desc: "these are the best params", Sheets: params.Sheets{
- "Network": ¶ms.Sheet{
- {Sel: "Layer", Desc: "all defaults",
- Params: params.Params{
- "Layer.Acts.Decay.Glong": "0.6", // 0.6
- "Layer.Acts.Dend.GbarExp": "0.5", // 0.5 best
- "Layer.Acts.Dend.GbarR": "6", // 6 best
- "Layer.Acts.Dt.VmDendTau": "5", // 5 > 2.81 here but small effect
- "Layer.Acts.NMDA.Gbar": "0.15", // 0.15
- "Layer.Acts.NMDA.ITau": "100", // 1 = get rid of I -- 100, 100 1.5, 1.2 kinda works
- "Layer.Acts.NMDA.Tau": "100", // 30 not good
- "Layer.Acts.NMDA.MgC": "1.4", // 1.2 > for Snmda, no Snmda = 1.0 > 1.2
- "Layer.Acts.NMDA.Voff": "5", // 5 > 0 but need to reduce gbar -- too much
- "Layer.Acts.Noise.On": "true",
- "Layer.Acts.Noise.Ge": "0.02", // induces significant variability in Rn Ge clamp firing
- "Layer.Acts.Noise.Gi": "0.05",
- "Layer.Acts.VGCC.Gbar": "0.02",
- "Layer.Acts.AK.Gbar": "2",
- // "Layer.Acts.AK.Hf": "5.5",
- // "Layer.Acts.AK.Mf": "0.2",
- "Layer.Learn.NeurCa.SpikeG": "8",
- "Layer.Learn.NeurCa.SynTau": "40", // 40 best in larger models
- "Layer.Learn.NeurCa.MTau": "10",
- "Layer.Learn.NeurCa.PTau": "40",
- "Layer.Learn.NeurCa.DTau": "40",
- "Layer.Learn.NeurCa.CaMax": "200",
- "Layer.Learn.NeurCa.CaThr": "0.05",
- "Layer.Learn.LrnNMDA.ITau": "1", // urakubo = 100, does not work here..
- "Layer.Learn.LrnNMDA.Tau": "50", // urakubo = 30 > 20 but no major effect on PCA
- }},
- {Sel: "Path", Desc: "basic path params",
- Params: params.Params{
- "Path.Learn.LRate.Base": "0.1", // 0.1 for SynSpkCa even though dwt equated
- "Path.SWts.Adapt.LRate": "0.08", // .1 >= .2, but .2 is fast enough for DreamVar .01.. .1 = more minconstraint
- "Path.SWts.Init.SPct": "0.5", // .5 >= 1 here -- 0.5 more reliable, 1.0 faster..
- "Path.SWts.Init.Var": "0", // .5 >= 1 here -- 0.5 more reliable, 1.0 faster..
- "Path.Learn.KinaseCa.SpikeG": "12", // keep at 12 standard, adjust other things
- "Path.Learn.KinaseCa.NMDAG": "40", // just to match SynSpk..
- "Path.Learn.KinaseCa.Rule": "SynSpkTheta", // "SynNMDACa",
- "Path.Learn.KinaseCa.MTau": "5", // 5 > 10 test more
- "Path.Learn.KinaseCa.PTau": "40",
- "Path.Learn.KinaseCa.DTau": "40",
- "Path.Learn.KinaseDWt.TWindow": "10",
- "Path.Learn.KinaseDWt.DMaxPct": "0.5",
- "Path.Learn.KinaseDWt.TrlDecay": "0.0",
- "Path.Learn.KinaseDWt.DScale": "1",
- "Path.Learn.XCal.On": "false",
- "Path.Learn.XCal.PThrMin": "0.05", // 0.05 best for objrec, higher worse
- "Path.Learn.XCal.LrnThr": "0.01", // 0.05 best for objrec, higher worse
- }},
- },
- }},
+// ParamSets is the default set of parameters -- Base is always applied, and others can be optionally
+// selected to apply on top of that
+var ParamSets = netparams.Sets{
+ "Base": {
+ {Sel: "Path", Desc: "no learning",
+ Params: params.Params{
+ "Path.Learn.Learn": "false",
+ }},
+ {Sel: "Layer", Desc: "generic params for all layers: lower gain, slower, soft clamp",
+ Params: params.Params{
+ "Layer.Inhib.Layer.On": "false",
+ "Layer.Acts.Init.Vm": "0.3",
+ }},
+ },
+ "Testing": {
+ {Sel: "Layer", Desc: "",
+ Params: params.Params{
+ "Layer.Acts.NMDA.Gbar": "0.0",
+ "Layer.Acts.GabaB.Gbar": "0.0",
+ }},
+ },
}
// Extra state for neuron
type NeuronEx struct {
- // time of last sending spike
- SCaUpT int
-
- // time of last recv spike
- RCaUpT int
-
- // sending poisson firing probability accumulator
- Sp float32
-
- // recv poisson firing probability accumulator
- Rp float32
-
- // NMDA mg-based blocking conductance
- NMDAGmg float32
-
- // when 0, it is time to learn according to theta cycle, otherwise increments up unless still -1 from init
- LearnNow float32
+ // input ISI countdown for spiking mode -- counts up
+ InISI float32
}
-func (nex *NeuronEx) Init() {
- nex.SCaUpT = -1
- nex.RCaUpT = -1
- nex.Sp = 1
- nex.Rp = 1
- nex.NMDAGmg = 0
- nex.LearnNow = -1
+func (nrn *NeuronEx) Init() {
+ nrn.InISI = 0
}
-////////////////////////////////////////////////////////////////////////
-
-// RGeStimForHzMap is the strength of GeStim G clamp to obtain a given R firing rate
-var RGeStimForHzMap = map[int]float32{
- 25: .09,
- 50: .12,
- 100: .15,
-}
-
-func RGeStimForHz(hz float32) float32 {
- var gel, geh, hzl, hzh float32
- switch {
- case hz <= 25:
- gel = 0
- geh = RGeStimForHzMap[25]
- hzl = 0
- hzh = 25
- case hz <= 50:
- gel = RGeStimForHzMap[25]
- geh = RGeStimForHzMap[50]
- hzl = 25
- hzh = 50
- case hz <= 100:
- gel = RGeStimForHzMap[50]
- geh = RGeStimForHzMap[100]
- hzl = 50
- hzh = 100
- default:
- gel = RGeStimForHzMap[100]
- geh = 2 * gel
- hzl = 100
- hzh = 200
- }
- return (gel + ((hz-hzl)/(hzh-hzl))*(geh-gel))
-}
+func (ss *Sim) ConfigNet(net *axon.Network) {
+ ctx := &ss.Context
-////////////////////////////////////////////////////////////////////////
-// Sim
+ net.InitName(net, "Neuron")
+ in := net.AddLayer2D("Input", 1, 1, axon.InputLayer)
+ hid := net.AddLayer2D("Neuron", 1, 1, axon.SuperLayer)
-func (ss *Sim) InitSyn(sy *axon.Synapse) {
- ss.Path.InitWtsSyn(sy, 0.5, 1)
-}
+ net.ConnectLayers(in, hid, paths.NewFull(), axon.ForwardPath)
-func (ss *Sim) ConfigNet(net *axon.Network) {
- net.InitName(net, "Neuron")
- sly := net.AddLayer2D("Send", 1, 1, axon.SuperLayer).(*axon.Layer)
- rly := net.AddLayer2D("Recv", 1, 1, axon.SuperLayer).(*axon.Layer)
- pj := net.ConnectLayers(sly, rly, paths.NewFull(), emer.Forward)
- err := net.Build()
+ err := net.Build(ctx)
if err != nil {
log.Println(err)
return
}
net.Defaults()
- ss.SendNeur = &sly.Neurons[0]
- ss.RecvNeur = &rly.Neurons[0]
- ss.Path = pj.(*axon.Path)
-}
-
-// NeuronUpdate updates the neuron with whether send or recv spiked
-func (ss *Sim) NeuronUpdate(sSpk, rSpk bool, ge, gi float32) {
- ly := ss.Net.LayByName("Recv")
- ac := &ly.Params.Act
- sn := ss.SendNeur
- rn := ss.RecvNeur
- nex := &ss.NeuronEx
-
- if sSpk {
- sn.Spike = 1
- sn.ISI = 0
- nex.SCaUpT = ss.Context.CycleTot
- } else {
- sn.Spike = 0
- sn.ISI += 1
- }
- ly.Params.Learn.LrnNMDA.SnmdaFromSpike(sn.Spike, &sn.SnmdaO, &sn.SnmdaI)
-
- // Recv
-
- ac.GeNoise(rn)
- ge += rn.GeNoise
- ac.GiNoise(rn)
- gi += rn.GiNoise
-
- if !ss.RGeClamp {
- if rSpk {
- rn.Spike = 1
- rn.ISI = 0
- nex.RCaUpT = ss.Context.CycleTot
- } else {
- rn.Spike = 0
- rn.ISI += 1
+ ss.SetParams("Network", false) // only set Network params
+ ss.InitWts(net)
+}
+
+// InitWts loads the saved weights
+func (ss *Sim) InitWts(net *axon.Network) {
+ net.InitWts(&ss.Context)
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Init, utils
+
+// Init restarts the run, and initializes everything, including network weights
+// and resets the epoch log table
+func (ss *Sim) Init() {
+ ss.Context.Reset()
+ ss.InitWts(ss.Net)
+ ss.NeuronEx.Init()
+ ss.GUI.StopNow = false
+ ss.SetParams("", false) // all sheets
+}
+
+// Counters returns a string of the current counter state
+// use tabs to achieve a reasonable formatting overall
+// and add a few tabs at the end to allow for expansion..
+func (ss *Sim) Counters() string {
+ return fmt.Sprintf("Cycle:\t%d\t\t\t", ss.Context.Cycle)
+}
+
+func (ss *Sim) UpdateView() {
+ ss.GUI.UpdatePlot(etime.Test, etime.Cycle)
+ ss.GUI.ViewUpdate.Text = ss.Counters()
+ ss.GUI.ViewUpdate.UpdateCycle(int(ss.Context.Cycle))
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Running the Network, starting bottom-up..
+
+// RunCycles updates neurons over specified number of cycles
+func (ss *Sim) RunCycles() {
+ ctx := &ss.Context
+ ss.Init()
+ ss.GUI.StopNow = false
+ ss.Net.InitActs(ctx)
+ ctx.NewState(etime.Train)
+ ss.SetParams("", false)
+ // ly := ss.Net.AxonLayerByName("Neuron")
+ // nrn := &(ly.Neurons[0])
+ inputOn := false
+ for cyc := 0; cyc < ss.Config.NCycles; cyc++ {
+ switch cyc {
+ case ss.Config.OnCycle:
+ inputOn = true
+ case ss.Config.OffCycle:
+ inputOn = false
+ }
+ ss.NeuronUpdate(ss.Net, inputOn)
+ ctx.Cycle = int32(cyc)
+ ss.Logs.LogRow(etime.Test, etime.Cycle, cyc)
+ ss.RecordValues(cyc)
+ if cyc%ss.Config.UpdateInterval == 0 {
+ ss.UpdateView()
+ }
+ ss.Context.CycleInc()
+ if ss.GUI.StopNow {
+ break
}
- rn.Ge = ge
- rn.GeSyn = ge
- rn.Gi = gi
- rn.GnmdaSyn = ge
- rn.Gnmda = ac.NMDA.Gnmda(rn.GnmdaSyn, rn.VmDend)
- rn.RnmdaSyn = ge
- mgg, cav := ac.NMDA.VFactors(rn.VmDend) // note: using Vm does NOT work well at all
- nex.NMDAGmg = mgg
- rn.RCa = rn.RnmdaSyn * mgg * cav
- rn.RCa = ly.Params.Learn.NeurCa.CaNorm(rn.RCa) // NOTE: RCa update from spike is 1 cycle behind Snmda
- } else {
- rn.GeRaw = ge
- ac.Dt.GeSynFromRaw(rn.GeRaw, &rn.GeSyn, ac.Init.GeBase)
- rn.Ge = rn.GeSyn
- rn.Gi = gi
- ac.NMDAFromRaw(rn, 0)
- nex.NMDAGmg = ac.NMDA.MgGFromV(rn.VmDend)
- }
- rn.GABAB, rn.GABABx = ac.GABAB.GABAB(rn.GABAB, rn.GABABx, rn.Gi)
- rn.GgabaB = ac.GABAB.GgabaB(rn.GABAB, rn.VmDend)
-
- rn.Ge += rn.Gvgcc + rn.Gnmda
- rn.Gi += rn.GgabaB
-
- ac.VmFromG(rn)
- if ss.RGeClamp {
- ac.ActFromG(rn)
}
- ly.Params.Learn.LrnNMDAFromRaw(rn, 0)
-
- ly.Params.Learn.CaFromSpike(rn)
- ly.Params.Learn.CaFromSpike(sn)
-
- ss.SynUpdate()
+ ss.UpdateView()
}
-// SynUpdate updates the synapses based on current neuron state
-func (ss *Sim) SynUpdate() {
- // ly := ss.Net.LayByName("Recv")
- pj := ss.Path
- kp := &pj.Params.Learn.KinaseCa
- twin := pj.Params.Learn.KinaseDWt.TWindow
- ctime := int32(ss.Context.CycleTot)
-
- pmsec := ss.MinusMsec + ss.PlusMsec
-
- sn := ss.SendNeur
- rn := ss.RecvNeur
-
- nst := &ss.SynNeurTheta
- sst := &ss.SynSpkTheta
- ssc := &ss.SynSpkCont
- snc := &ss.SynNMDACont
-
- //////////////////////////////
- // Theta
-
- // NeurSpkTheta continuous update: standard CHL s * r product form
- nst.CaM = ss.PGain * sn.CaM * rn.CaM
- nst.CaP = ss.PGain * sn.CaP * rn.CaP
- nst.CaD = ss.PGain * sn.CaD * rn.CaD
-
- synspk := false
- if sn.Spike > 0 || rn.Spike > 0 {
- synspk = true
- }
-
- // SynSpkTheta
- if synspk {
- sst.CaM, sst.CaP, sst.CaD = kp.CurCa(ctime-1, sst.CaUpT, sst.CaM, sst.CaP, sst.CaD)
- sst.Ca = kp.SpikeG * sn.CaSyn * rn.CaSyn
- kp.FromCa(sst.Ca, &sst.CaM, &sst.CaP, &sst.CaD)
- sst.CaUpT = ctime
+func (ss *Sim) RecordValues(cyc int) {
+ var vals []float32
+ ly := ss.Net.AxonLayerByName("Neuron")
+ key := fmt.Sprintf("cyc: %03d", cyc)
+ for _, vnm := range axon.NeuronVarNames {
+ ly.UnitValues(&vals, vnm, 0)
+ vkey := key + fmt.Sprintf("\t%s", vnm)
+ ss.ValMap[vkey] = vals[0]
}
+}
- if ss.Context.Cycle == pmsec {
- if pj.Params.Learn.XCal.On {
- nst.DWt = pj.Params.Learn.XCal.DWt(nst.CaP, nst.CaD)
- sst.DWt = pj.Params.Learn.XCal.DWt(sst.CaP, sst.CaD)
+// NeuronUpdate updates the neuron
+// this just calls the relevant code directly, bypassing most other stuff.
+func (ss *Sim) NeuronUpdate(nt *axon.Network, inputOn bool) {
+ ctx := &ss.Context
+ ly := ss.Net.AxonLayerByName("Neuron")
+ ni := ly.NeurStIndex
+ di := uint32(0)
+ ac := &ly.Params.Acts
+ nex := &ss.NeuronEx
+ // nrn.Noise = float32(ly.Params.Act.Noise.Gen(-1))
+ // nrn.Ge += nrn.Noise // GeNoise
+ // nrn.Gi = 0
+ if inputOn {
+ if ss.Config.GeClamp {
+ axon.SetNrnV(ctx, ni, di, axon.GeRaw, ss.Config.Ge)
+ axon.SetNrnV(ctx, ni, di, axon.GeSyn, ac.Dt.GeSynFromRawSteady(axon.NrnV(ctx, ni, di, axon.GeRaw)))
} else {
- nst.DWt = nst.CaP - nst.CaD
- sst.DWt = sst.CaP - sst.CaD
+ nex.InISI += 1
+ if nex.InISI > 1000/ss.Config.SpikeHz {
+ axon.SetNrnV(ctx, ni, di, axon.GeRaw, ss.Config.Ge)
+ nex.InISI = 0
+ } else {
+ axon.SetNrnV(ctx, ni, di, axon.GeRaw, 0)
+ }
+ axon.SetNrnV(ctx, ni, di, axon.GeSyn, ac.Dt.GeSynFromRaw(axon.NrnV(ctx, ni, di, axon.GeSyn), axon.NrnV(ctx, ni, di, axon.GeRaw)))
}
- }
-
- //////////////////////////////
- // Cont
-
- sisi := int(sn.ISI)
- tdw := (sisi == twin || (sn.Spike > 0 && sisi < twin))
- risi := int(rn.ISI)
- tdw = tdw || (risi == twin || (rn.Spike > 0 && risi < twin))
-
- // SynSpkCont: continuous synaptic updating
- if synspk {
- ssc.Ca = kp.SpikeG * sn.CaSyn * rn.CaSyn
- ssc.CaUpT = ctime
} else {
- ssc.Ca = 0
+ axon.SetNrnV(ctx, ni, di, axon.GeRaw, 0)
+ axon.SetNrnV(ctx, ni, di, axon.GeSyn, 0)
}
- kp.FromCa(ssc.Ca, &ssc.CaM, &ssc.CaP, &ssc.CaD)
-
- // SynNMDACont: NMDA driven synaptic updating
- snc.Ca = kp.NMDAG * sn.SnmdaO * rn.RCa
- kp.FromCa(snc.Ca, &snc.CaM, &snc.CaP, &snc.CaD)
-
- if tdw {
- pj.Params.Learn.KinaseTDWt(ssc)
- pj.Params.Learn.KinaseTDWt(snc)
- }
- pj.Params.Learn.CaDMax(ssc)
- pj.Params.Learn.CaDMax(snc)
-
- if ss.Context.Cycle == pmsec {
- axon.DecaySynCa(ssc, pj.Params.Learn.KinaseDWt.TrlDecay)
- axon.DecaySynCa(snc, pj.Params.Learn.KinaseDWt.TrlDecay)
+ axon.SetNrnV(ctx, ni, di, axon.GiRaw, ss.Config.Gi)
+ axon.SetNrnV(ctx, ni, di, axon.GiSyn, ac.Dt.GiSynFromRawSteady(axon.NrnV(ctx, ni, di, axon.GiRaw)))
+
+ if ss.Net.GPU.On {
+ ss.Net.GPU.SyncStateToGPU()
+ ss.Net.GPU.RunPipelineWait("Cycle", 2)
+ ss.Net.GPU.SyncStateFromGPU()
+ ctx.CycleInc() // why is this not working!?
+ } else {
+ lpl := ly.Pool(0, di)
+ ly.GInteg(ctx, ni, di, lpl, ly.LayerValues(0))
+ ly.SpikeFromG(ctx, ni, di, lpl)
}
- pj.Params.Learn.DWtFromTDWt(ssc, 1)
- pj.Params.Learn.DWtFromTDWt(snc, 1)
-}
-
-func (ss *Sim) InitWts() {
- nst := &ss.SynNeurTheta
- sst := &ss.SynSpkTheta
- ssc := &ss.SynSpkCont
- snc := &ss.SynNMDACont
- nst.DWt = 0
- sst.DWt = 0
- ssc.DWt = 0
- snc.DWt = 0
-}
-
-///////////////////////////////////////////////////////////////////
-// Logging
-
-func (ss *Sim) LogSyn(dt *table.Table, row int, pre string, sy *axon.Synapse) {
- dt.SetFloat(pre+"Ca", row, float64(sy.Ca))
- dt.SetFloat(pre+"CaM", row, float64(sy.CaM))
- dt.SetFloat(pre+"CaP", row, float64(sy.CaP))
- dt.SetFloat(pre+"CaD", row, float64(sy.CaD))
- dt.SetFloat(pre+"CaDMax", row, float64(sy.CaDMax))
- dt.SetFloat(pre+"TDWt", row, float64(sy.TDWt))
- dt.SetFloat(pre+"DWt", row, float64(sy.DWt))
- dt.SetFloat(pre+"Wt", row, float64(sy.Wt))
-}
-
-// LogState records data for given cycle
-func (ss *Sim) LogState(dt *table.Table, row, trl, cyc int) {
- sn := ss.SendNeur
- rn := ss.RecvNeur
- dt.SetFloat("Trial", row, float64(trl))
- dt.SetFloat("Cycle", row, float64(cyc))
- dt.SetFloat("SSpike", row, float64(ss.SpikeDisp*sn.Spike))
- dt.SetFloat("RSpike", row, float64(ss.SpikeDisp*rn.Spike))
+ sly := ss.Net.AxonLayerByName("Input")
+ sly.Params.Learn.CaFromSpike(ctx, 0, di)
- dt.SetFloat("SnmdaO", row, float64(sn.SnmdaO))
- dt.SetFloat("SnmdaI", row, float64(sn.SnmdaI))
+ updtThr := float32(0)
+ si := uint32(0)
+ ri := uint32(1)
+ syni := uint32(0)
+ pj := ly.RcvPaths[0]
- dt.SetFloat("Ge", row, float64(rn.Ge))
- dt.SetFloat("Inet", row, float64(rn.Inet))
- dt.SetFloat("Vm", row, float64(rn.Vm))
- dt.SetFloat("Act", row, float64(rn.Act))
- dt.SetFloat("Gk", row, float64(rn.Gk))
- dt.SetFloat("ISI", row, float64(rn.ISI))
- dt.SetFloat("VmDend", row, float64(rn.VmDend))
- dt.SetFloat("Gnmda", row, float64(rn.Gnmda))
- dt.SetFloat("RnmdaSyn", row, float64(rn.RnmdaSyn))
- dt.SetFloat("RCa", row, float64(rn.RCa))
- // dt.SetFloat("NMDAGmg", row, float64(nex.NMDAGmg))
- // dt.SetFloat("GABAB", row, float64(rn.GABAB))
- // dt.SetFloat("GgabaB", row, float64(rn.GgabaB))
- dt.SetFloat("Gvgcc", row, float64(rn.Gvgcc))
- dt.SetFloat("VgccM", row, float64(rn.VgccM))
- dt.SetFloat("VgccH", row, float64(rn.VgccH))
- dt.SetFloat("VgccCa", row, float64(rn.VgccCa))
- dt.SetFloat("Gak", row, float64(rn.Gak))
- // dt.SetFloat("LearnNow", row, float64(nex.LearnNow))
-
- nst := &ss.SynNeurTheta
- sst := &ss.SynSpkTheta
- ssc := &ss.SynSpkCont
- snc := &ss.SynNMDACont
-
- dt.SetFloat("R_CaM", row, float64(rn.CaM))
- dt.SetFloat("R_CaP", row, float64(rn.CaP))
- dt.SetFloat("R_CaD", row, float64(rn.CaD))
-
- dt.SetFloat("S_CaM", row, float64(sn.CaM))
- dt.SetFloat("S_CaP", row, float64(sn.CaP))
- dt.SetFloat("S_CaD", row, float64(sn.CaD))
-
- ss.LogSyn(dt, row, "NST_", nst)
- ss.LogSyn(dt, row, "SST_", sst)
- ss.LogSyn(dt, row, "SSC_", ssc)
- ss.LogSyn(dt, row, "SNC_", snc)
-}
+ snCaSyn := pj.Params.Learn.KinaseCa.SpikeG * axon.NrnV(ctx, ni, di, axon.CaSyn)
+ pj.Params.SynCaSyn(ctx, syni, ri, di, snCaSyn, updtThr)
-func (ss *Sim) ConfigTable(dt *table.Table) {
- dt.SetMetaData("name", "Kinase Equations Table")
- dt.SetMetaData("read-only", "true")
- dt.SetMetaData("precision", strconv.Itoa(LogPrec))
-
- dt.AddStringColumn(Cond")
- dt.AddFloat64Column("ErrDWt")
- dt.AddFloat64Column("Trial")
- dt.AddFloat64Column("Cycle")
- dt.AddFloat64Column("SSpike")
- dt.AddFloat64Column("RSpike")
-
- dt.AddFloat64Column("SnmdaO")
- dt.AddFloat64Column("SnmdaI")
-
- dt.AddFloat64Column("Ge")
- dt.AddFloat64Column("Inet")
- dt.AddFloat64Column("Vm")
- dt.AddFloat64Column("Act")
- dt.AddFloat64Column("Gk")
- dt.AddFloat64Column("ISI")
- dt.AddFloat64Column("VmDend")
- dt.AddFloat64Column("Gnmda")
- dt.AddFloat64Column("RnmdaSyn")
- dt.AddFloat64Column("RCa")
- // {"NMDAGmg")
- // {"GABAB")
- // {"GgabaB")
- dt.AddFloat64Column("Gvgcc")
- dt.AddFloat64Column("VgccM")
- dt.AddFloat64Column("VgccH")
- dt.AddFloat64Column("VgccCa")
- dt.AddFloat64Column("Gak")
- // {"LearnNow")
- dt.AddFloat64Column("R_CaM")
- dt.AddFloat64Column("R_CaP")
- dt.AddFloat64Column("R_CaD")
- dt.AddFloat64Column("S_CaM")
- dt.AddFloat64Column("S_CaP")
- dt.AddFloat64Column("S_CaD")
-
- ss.ConfigSynapse(dt, "NST_")
- ss.ConfigSynapse(dt, "SST_")
- ss.ConfigSynapse(dt, "SSC_")
- ss.ConfigSynapse(dt, "SNC_")
-
- dt.SetNumRows(0)
-}
-
-func (ss *Sim) ConfigSynapse(dt *table.Table, pre string) {
- dt.AddFloat64Column(pre + "Ca")
- dt.AddFloat64Column(pre + "CaM")
- dt.AddFloat64Column(pre + "CaP")
- dt.AddFloat64Column(pre + "CaD")
- dt.AddFloat64Column(pre + "CaDMax")
- dt.AddFloat64Column(pre + "TDWt")
- dt.AddFloat64Column(pre + "DWt")
- dt.AddFloat64Column(pre + "Wt")
-}
-
-func (ss *Sim) ConfigTrialPlot(plt *plotview.PlotView, dt *table.Table) *plotview.PlotView {
- plt.Params.Title = "Kinase Equations Trial Plot"
- plt.Params.XAxisColumn = "Cycle"
- plt.SetTable(dt)
-
- for _, cn := range dt.ColumnNames {
- if cn == "Cycle" {
- continue
- }
- switch {
- case strings.Contains(cn, "DWt"):
- plt.SetColParams(cn, plotview.Off, plotview.FloatMin, 0, plotview.FloatMax, 0)
- case cn == "SSC_CaP" || cn == "SSC_CaD":
- plt.SetColParams(cn, plotview.On, plotview.FloatMin, 0, plotview.FloatMax, 0)
- default:
- plt.SetColParams(cn, plotview.Off, plotview.FixMin, 0, plotview.FloatMax, 0)
- }
+ rnCaSyn := pj.Params.Learn.KinaseCa.SpikeG * axon.NrnV(ctx, ri, di, axon.CaSyn)
+ if axon.NrnV(ctx, si, di, axon.Spike) <= 0 { // NOT already handled in send version
+ pj.Params.SynCaSyn(ctx, syni, si, di, rnCaSyn, updtThr)
}
- // plt.SetColParams("SynCSpkCaM", plotview.On, plotview.FloatMin, 0, plotview.FloatMax, 0)
- // plt.SetColParams("SynOSpkCaM", plotview.On, plotview.FloatMin, 0, plotview.FloatMax, 0)
-
- plt.SetColParams("SSpike", plotview.On, plotview.FloatMin, 0, plotview.FloatMax, 0)
- plt.SetColParams("RSpike", plotview.On, plotview.FloatMin, 0, plotview.FloatMax, 0)
-
- return plt
}
-func (ss *Sim) ConfigRunPlot(plt *plotview.PlotView, dt *table.Table) *plotview.PlotView {
- plt.Params.Title = "Kinase Equations Run Plot"
- plt.Params.XAxisColumn = "Trial"
- // plt.Params.LegendCol = "Cond"
- plt.SetTable(dt)
- plt.Params.Points = true
- plt.Params.Lines = false
-
- for _, cn := range dt.ColumnNames {
- switch {
- case strings.Contains(cn, "DWt"):
- plt.SetColParams(cn, plotview.On, plotview.FloatMin, 0, plotview.FloatMax, 0)
- default:
- plt.SetColParams(cn, plotview.Off, plotview.FixMin, 0, plotview.FloatMax, 0)
- }
- }
-
- return plt
+// Stop tells the sim to stop running
+func (ss *Sim) Stop() {
+ ss.GUI.StopNow = true
}
-func (ss *Sim) ConfigDWtPlot(plt *plotview.PlotView, dt *table.Table) *plotview.PlotView {
- plt.Params.Title = "Kinase Equations DWt Plot"
- plt.Params.XAxisColumn = "ErrDWt"
- plt.Params.LegendCol = "Cond"
- plt.Params.Scale = 3
- plt.SetTable(dt)
- plt.Params.Points = true
- plt.Params.Lines = false
-
- for _, cn := range dt.ColumnNames {
- switch {
- case cn == "ErrDWt":
- plt.SetColParams(cn, plotview.Off, plotview.FixMin, -1, plotview.FixMax, 1.5)
- case cn == "SSC_DWt":
- plt.SetColParams(cn, plotview.On, plotview.FloatMin, 0, plotview.FloatMax, 0)
- case strings.Contains(cn, "_DWt"):
- plt.SetColParams(cn, plotview.On, plotview.FloatMin, 0, plotview.FloatMax, 0)
- // case strings.HasPrefix(cn, "X_"):
- // plt.SetColParams(cn, plotview.On, plotview.FloatMin, 0, plotview.FloatMax, 0)
- default:
- plt.SetColParams(cn, plotview.Off, plotview.FloatMin, 0, plotview.FloatMax, 0)
- }
- }
+/////////////////////////////////////////////////////////////////////////
+// Params setting
- return plt
+// SetParams sets the params for "Base" and then current ParamSet.
+// If sheet is empty, then it applies all avail sheets (e.g., Network, Sim)
+// otherwise just the named sheet
+// if setMsg = true then we output a message for each param that was set.
+func (ss *Sim) SetParams(sheet string, setMsg bool) {
+ ss.Params.SetAll()
}
diff --git a/examples/kinaseq/sim.go b/examples/kinaseq/sim.go
new file mode 100644
index 000000000..6bf7eceb6
--- /dev/null
+++ b/examples/kinaseq/sim.go
@@ -0,0 +1,373 @@
+// Copyright (c) 2019, The Emergent Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+kinaseq: Explores calcium-based synaptic learning rules,
+specifically at the synaptic level.
+*/
+package main
+
+//go:generate core generate -add-types
+
+import (
+ "os"
+ "reflect"
+
+ "cogentcore.org/core/base/mpi"
+ "cogentcore.org/core/core"
+ "cogentcore.org/core/icons"
+ "cogentcore.org/core/math32/minmax"
+ "cogentcore.org/core/tensor/table"
+ "github.com/emer/axon/v2/axon"
+ "github.com/emer/axon/v2/kinase"
+ "github.com/emer/emergent/v2/ecmd"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/netview"
+)
+
+func main() {
+ sim := &Sim{}
+ sim.New()
+ sim.ConfigAll()
+ if sim.Config.GUI {
+ sim.RunGUI()
+ } else {
+ sim.RunNoGUI()
+ }
+}
+
+// see config.go for Config
+
+// Sim encapsulates the entire simulation model, and we define all the
+// functionality as methods on this struct. This structure keeps all relevant
+// state information organized and available without having to pass everything around
+// as arguments to methods, and provides the core GUI interface (note the view tags
+// for the fields which provide hints to how things should be displayed).
+type Sim struct {
+
+ // simulation configuration parameters -- set by .toml config file and / or args
+ Config Config
+
+ // Kinase SynCa params
+ CaParams kinase.CaParams
+
+ // Kinase state
+ Kinase KinaseState
+
+ // the network -- click to view / edit parameters for layers, paths, etc
+ Net *axon.Network `view:"no-inline"`
+
+ // extra neuron state for additional channels: VGCC, AK
+ NeuronEx NeuronEx `view:"no-inline"`
+
+ // axon timing parameters and state
+ Context axon.Context
+
+ // contains computed statistic values
+ Stats estats.Stats
+
+ // logging
+ Logs elog.Logs `view:"no-inline"`
+
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
+
+ // current cycle of updating
+ Cycle int `edit:"-"`
+
+ // netview update parameters
+ ViewUpdate netview.ViewUpdate `view:"inline"`
+
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
+
+ // map of values for detailed debugging / testing
+ ValMap map[string]float32 `view:"-"`
+}
+
+// New creates new blank elements and initializes defaults
+func (ss *Sim) New() {
+ ss.Net = &axon.Network{}
+ econfig.Config(&ss.Config, "config.toml")
+ ss.Config.Params.Update()
+ ss.Params.Config(ParamSets, ss.Config.Params.Sheet, ss.Config.Params.Tag, ss.Net)
+ ss.CaParams.Defaults()
+ ss.Stats.Init()
+ ss.ValMap = make(map[string]float32)
+}
+
+func (ss *Sim) Defaults() {
+ ss.Params.Config(ParamSets, ss.Config.Params.Sheet, ss.Config.Params.Tag, ss.Net)
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// Configs
+
+// ConfigAll configures all the elements using the standard functions
+func (ss *Sim) ConfigAll() {
+ ss.ConfigNet(ss.Net)
+ ss.ConfigLogs()
+ if ss.Config.Params.SaveAll {
+ ss.Config.Params.SaveAll = false
+ ss.Net.SaveParamsSnapshot(&ss.Params.Params, &ss.Config, ss.Config.Params.Good)
+ os.Exit(0)
+ }
+}
+
+func (ss *Sim) ConfigLogs() {
+ if ss.Config.Run.Neuron {
+ ss.ConfigNeuronLogItems()
+ } else {
+ ss.ConfigKinaseLogItems()
+ }
+ ss.Logs.CreateTables()
+
+ if ss.Config.Run.Neuron {
+ ss.Logs.PlotItems("Vm", "Spike")
+ } else {
+ ss.Logs.PlotItems("SendSpike", "RecvSpike")
+ }
+
+ ss.Logs.SetContext(&ss.Stats, ss.Net)
+ ss.Logs.ResetLog(etime.Test, etime.Cycle)
+}
+
+func (ss *Sim) ConfigNeuronLogItems() {
+ ly := ss.Net.AxonLayerByName("Neuron")
+ // nex := &ss.NeuronEx
+ lg := &ss.Logs
+
+ lg.AddItem(&elog.Item{
+ Name: "Cycle",
+ Type: reflect.Int,
+ FixMax: false,
+ Range: minmax.F32{Max: 1},
+ Write: elog.WriteMap{
+ etime.Scope(etime.Test, etime.Cycle): func(ctx *elog.Context) {
+ ctx.SetInt(int(ss.Context.Cycle))
+ }}})
+
+ vars := []string{"GeSyn", "Ge", "Gi", "Inet", "Vm", "Act", "Spike", "Gk", "ISI", "ISIAvg", "VmDend", "GnmdaSyn", "Gnmda", "GABAB", "GgabaB", "Gvgcc", "VgccM", "VgccH", "Gak", "MahpN", "GknaMed", "GknaSlow", "GiSyn", "CaSyn"}
+
+ for _, vnm := range vars {
+ lg.AddItem(&elog.Item{
+ Name: vnm,
+ Type: reflect.Float64,
+ FixMax: false,
+ Range: minmax.F32{Max: 1},
+ Write: elog.WriteMap{
+ etime.Scope(etime.Test, etime.Cycle): func(ctx *elog.Context) {
+ vl := ly.UnitValue(vnm, []int{0, 0}, 0)
+ ctx.SetFloat32(vl)
+ }}})
+ }
+
+ pj := ly.RcvPaths[0]
+ pvars := []string{"CaM", "CaP", "CaD", "CaUpT"}
+ for _, vnm := range pvars {
+ lg.AddItem(&elog.Item{
+ Name: "Syn." + vnm,
+ Type: reflect.Float64,
+ FixMax: false,
+ Range: minmax.F32{Max: 1},
+ Write: elog.WriteMap{
+ etime.Scope(etime.Test, etime.Cycle): func(ctx *elog.Context) {
+ vl := pj.SynValue(vnm, 0, 0)
+ ctx.SetFloat32(vl)
+ }}})
+ }
+}
+
+func (ss *Sim) ResetTstCycPlot() {
+ ss.Logs.ResetLog(etime.Test, etime.Cycle)
+ ss.GUI.UpdatePlot(etime.Test, etime.Cycle)
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+// Gui
+
+func (ss *Sim) ConfigNetView(nv *netview.NetView) {
+ nv.ViewDefaults()
+}
+
+// ConfigGUI configures the Cogent Core GUI interface for this simulation.
+func (ss *Sim) ConfigGUI() {
+ title := "Kinase Eq"
+ ss.GUI.MakeBody(ss, "kinaseq", title, `kinaseq: Explores calcium-based synaptic learning rules, specifically at the synaptic level. See README.md on GitHub.`)
+ ss.GUI.CycleUpdateInterval = 10
+
+ nv := ss.GUI.AddNetView("NetView")
+ nv.Var = "Act"
+ nv.SetNet(ss.Net)
+ ss.ConfigNetView(nv) // add labels etc
+ ss.ViewUpdate.Config(nv, etime.AlphaCycle, etime.AlphaCycle)
+ ss.GUI.ViewUpdate = &ss.ViewUpdate
+
+ ss.GUI.AddPlots(title, &ss.Logs)
+ // key := etime.Scope(etime.Test, etime.Cycle)
+ // plt := ss.GUI.NewPlot(key, ss.GUI.Tabs.NewTab("TstCycPlot"))
+ // plt.SetTable(ss.Logs.Table(etime.Test, etime.Cycle))
+ // egui.ConfigPlotFromLog("Neuron", plt, &ss.Logs, key)
+ // ss.TstCycPlot = plt
+
+ ss.GUI.Body.AddAppBar(func(tb *core.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: icons.Update,
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Stop", Icon: icons.Stop,
+ Tooltip: "Stops running.",
+ Active: egui.ActiveRunning,
+ Func: func() {
+ ss.Stop()
+ ss.GUI.UpdateWindow()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Sweep", Icon: icons.PlayArrow,
+ Tooltip: "Runs Kinase sweep over set of minus / plus spiking levels.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ if !ss.GUI.IsRunning {
+ go func() {
+ ss.GUI.IsRunning = true
+ ss.Sweep()
+ ss.GUI.IsRunning = false
+ ss.GUI.UpdateWindow()
+ }()
+ }
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Run", Icon: icons.PlayArrow,
+ Tooltip: "Runs NTrials of Kinase updating.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ if !ss.GUI.IsRunning {
+ go func() {
+ ss.GUI.IsRunning = true
+ ss.Run()
+ ss.GUI.IsRunning = false
+ ss.GUI.UpdateWindow()
+ }()
+ }
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Trial", Icon: icons.PlayArrow,
+ Tooltip: "Runs one Trial of Kinase updating.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ if !ss.GUI.IsRunning {
+ go func() {
+ ss.GUI.IsRunning = true
+ ss.Trial()
+ ss.GUI.IsRunning = false
+ ss.GUI.UpdateWindow()
+ }()
+ }
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Run Neuron", Icon: icons.PlayArrow,
+ Tooltip: "Runs neuron updating over NCycles.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ if !ss.GUI.IsRunning {
+ go func() {
+ ss.GUI.IsRunning = true
+ ss.RunCycles()
+ ss.GUI.IsRunning = false
+ ss.GUI.UpdateWindow()
+ }()
+ }
+ },
+ })
+ core.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset Plot", Icon: icons.Update,
+ Tooltip: "Reset TstCycPlot.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.ResetTstCycPlot()
+ ss.GUI.UpdateWindow()
+ },
+ })
+
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Defaults", Icon: icons.Update,
+ Tooltip: "Restore initial default parameters.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Defaults()
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ core.TheApp.OpenURL("https://github.com/emer/axon/blob/master/examples/neuron/README.md")
+ },
+ })
+ })
+ ss.GUI.FinalizeGUI(false)
+
+ if ss.Config.Run.GPU {
+ ss.Net.ConfigGPUwithGUI(&ss.Context)
+ core.TheApp.AddQuitCleanFunc(func() {
+ ss.Net.GPU.Destroy()
+ })
+ }
+}
+
+func (ss *Sim) RunGUI() {
+ ss.Init()
+ ss.ConfigGUI()
+ ss.GUI.Body.RunMainWindow()
+}
+
+func (ss *Sim) RunNoGUI() {
+ if ss.Config.Params.Note != "" {
+ mpi.Printf("Note: %s\n", ss.Config.Params.Note)
+ }
+ if ss.Config.Log.SaveWts {
+ mpi.Printf("Saving final weights per run\n")
+ }
+ runName := ss.Params.RunName(ss.Config.Run.Run)
+ ss.Stats.SetString("RunName", runName) // used for naming logs, stats, etc
+ netName := ss.Net.Name()
+
+ // netdata := ss.Config.Log.NetData
+ // if netdata {
+ // mpi.Printf("Saving NetView data from testing\n")
+ // ss.GUI.InitNetData(ss.Net, 200)
+ // }
+
+ ss.Init()
+
+ if ss.Config.Run.GPU {
+ ss.Net.ConfigGPUnoGUI(&ss.Context)
+ }
+ mpi.Printf("Set NThreads to: %d\n", ss.Net.NThreads)
+
+ ss.RunCycles()
+
+ if ss.Config.Log.Cycle {
+ dt := ss.Logs.Table(etime.Test, etime.Cycle)
+ fnm := ecmd.LogFilename("cyc", netName, runName)
+ dt.SaveCSV(core.Filename(fnm), table.Tab, table.Headers)
+ }
+
+ // if netdata {
+ // ss.GUI.SaveNetData(ss.Stats.String("RunName"))
+ // }
+
+ ss.Net.GPU.Destroy() // safe even if no GPU
+}
diff --git a/examples/neuron/neuron.go b/examples/neuron/neuron.go
index 437b68bfd..4962467a1 100644
--- a/examples/neuron/neuron.go
+++ b/examples/neuron/neuron.go
@@ -420,10 +420,12 @@ func (ss *Sim) ConfigGUI() {
Active: egui.ActiveStopped,
Func: func() {
if !ss.GUI.IsRunning {
- ss.GUI.IsRunning = true
- ss.RunCycles()
- ss.GUI.IsRunning = false
- ss.GUI.UpdateWindow()
+ go func() {
+ ss.GUI.IsRunning = true
+ ss.RunCycles()
+ ss.GUI.IsRunning = false
+ ss.GUI.UpdateWindow()
+ }()
}
},
})