diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index ca0e35bc1..823d338ec 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -22,7 +22,7 @@ jobs:
sudo apt-get install libgl1-mesa-dev xorg-dev mesa-vulkan-drivers
- uses: actions/setup-go@v3
with:
- go-version: "1.19"
+ go-version: "1.21"
- run: go build ./...
- run: go test -tags=multinet -race ./...
@@ -38,7 +38,7 @@ jobs:
sudo apt-get install libgl1-mesa-dev xorg-dev mesa-vulkan-drivers
- uses: actions/setup-go@v3
with:
- go-version: "1.19"
+ go-version: "1.21"
- name: Generate coverage report
run: go test -tags=multinet ./... -coverprofile=coverage.txt -covermode=atomic -timeout=20m
- name: Upload coverage report
diff --git a/.goki/config.toml b/.goki/config.toml
new file mode 100644
index 000000000..61d20cfc3
--- /dev/null
+++ b/.goki/config.toml
@@ -0,0 +1,76 @@
+Name = "axon"
+Desc = ""
+Version = "v2.0.0-dev0.0.2"
+Type = "Library"
+
+[Build]
+ Package = "."
+ Output = ""
+ ID = "org.emer.axon"
+ Debug = false
+ Rebuild = false
+ Install = false
+ PrintOnly = false
+ Print = false
+ Trimpath = false
+ Work = false
+ IOSVersion = "13.0"
+ AndroidMinSDK = 23
+ AndroidTargetSDK = 29
+
+[Web]
+ Port = "8080"
+ RandomVersion = false
+ Gzip = false
+ BackgroundColor = "#2d2c2c"
+ ThemeColor = "#2d2c2c"
+ LoadingLabel = ""
+ Lang = "en"
+ Title = ""
+ Description = ""
+ Author = ""
+ Image = ""
+ AutoUpdateInterval = "10s"
+ WasmContentLengthHeader = ""
+ ServiceWorkerTemplate = ""
+
+[Setup]
+ [Setup.Platform]
+ OS = ""
+ Arch = ""
+
+[Log]
+ Target = "android"
+ Keep = false
+ All = "F"
+
+[Release]
+ VersionFile = "axon/version.go"
+ Package = "axon"
+
+[Generate]
+ Dir = "."
+ Output = "gokigen.go"
+ [Generate.Enumgen]
+ Dir = "."
+ Output = "enumgen.go"
+ Transform = ""
+ TrimPrefix = ""
+ AddPrefix = ""
+ LineComment = true
+ AcceptLower = true
+ Text = true
+ JSON = false
+ YAML = false
+ SQL = false
+ GQL = false
+ Extend = true
+ [Generate.Gtigen]
+ Dir = "."
+ Output = "gtigen.go"
+ AddTypes = false
+ AddMethods = false
+ AddFuncs = false
+ Instance = false
+ TypeVar = false
+ Setters = false
diff --git a/axon/act.go b/axon/act.go
index 803441deb..7844cdfb0 100644
--- a/axon/act.go
+++ b/axon/act.go
@@ -6,10 +6,10 @@ package axon
import (
"github.com/emer/axon/chans"
- "github.com/emer/emergent/erand"
- "github.com/emer/etable/minmax"
- "github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/gosl/v2/slbool"
+ "goki.dev/mat32/v2"
)
///////////////////////////////////////////////////////////////////////
@@ -31,38 +31,38 @@ import (
// the AdEx adaptive exponential function (adapt is KNaAdapt)
type SpikeParams struct {
- // [def: 0.5] threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization
- Thr float32 `def:"0.5" desc:"threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization"`
+ // threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization
+ Thr float32 `def:"0.5"`
- // [def: 0.3] post-spiking membrane potential to reset to, produces refractory effect if lower than VmInit -- 0.3 is apropriate biologically-based value for AdEx (Brette & Gurstner, 2005) parameters. See also RTau
- VmR float32 `def:"0.3" desc:"post-spiking membrane potential to reset to, produces refractory effect if lower than VmInit -- 0.3 is apropriate biologically-based value for AdEx (Brette & Gurstner, 2005) parameters. See also RTau"`
+ // post-spiking membrane potential to reset to, produces refractory effect if lower than VmInit -- 0.3 is apropriate biologically-based value for AdEx (Brette & Gurstner, 2005) parameters. See also RTau
+ VmR float32 `def:"0.3"`
- // [def: 3] [min: 1] post-spiking explicit refractory period, in cycles -- prevents Vm updating for this number of cycles post firing -- Vm is reduced in exponential steps over this period according to RTau, being fixed at Tr to VmR exactly
- Tr int32 `min:"1" def:"3" desc:"post-spiking explicit refractory period, in cycles -- prevents Vm updating for this number of cycles post firing -- Vm is reduced in exponential steps over this period according to RTau, being fixed at Tr to VmR exactly"`
+ // post-spiking explicit refractory period, in cycles -- prevents Vm updating for this number of cycles post firing -- Vm is reduced in exponential steps over this period according to RTau, being fixed at Tr to VmR exactly
+ Tr int32 `min:"1" def:"3"`
- // [def: 1.6667] time constant for decaying Vm down to VmR -- at end of Tr it is set to VmR exactly -- this provides a more realistic shape of the post-spiking Vm which is only relevant for more realistic channels that key off of Vm -- does not otherwise affect standard computation
- RTau float32 `def:"1.6667" desc:"time constant for decaying Vm down to VmR -- at end of Tr it is set to VmR exactly -- this provides a more realistic shape of the post-spiking Vm which is only relevant for more realistic channels that key off of Vm -- does not otherwise affect standard computation"`
+ // time constant for decaying Vm down to VmR -- at end of Tr it is set to VmR exactly -- this provides a more realistic shape of the post-spiking Vm which is only relevant for more realistic channels that key off of Vm -- does not otherwise affect standard computation
+ RTau float32 `def:"1.6667"`
- // [def: true] if true, turn on exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation
- Exp slbool.Bool `def:"true" desc:"if true, turn on exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation"`
+ // if true, turn on exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation
+ Exp slbool.Bool `def:"true"`
- // [def: 0.02] [viewif: Exp] slope in Vm (2 mV = .02 in normalized units) for extra exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation
- ExpSlope float32 `viewif:"Exp" def:"0.02" desc:"slope in Vm (2 mV = .02 in normalized units) for extra exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation"`
+ // slope in Vm (2 mV = .02 in normalized units) for extra exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation
+ ExpSlope float32 `viewif:"Exp" def:"0.02"`
- // [def: 0.9] [viewif: Exp] membrane potential threshold for actually triggering a spike when using the exponential mechanism
- ExpThr float32 `viewif:"Exp" def:"0.9" desc:"membrane potential threshold for actually triggering a spike when using the exponential mechanism"`
+ // membrane potential threshold for actually triggering a spike when using the exponential mechanism
+ ExpThr float32 `viewif:"Exp" def:"0.9"`
- // [def: 180] [min: 1] for translating spiking interval (rate) into rate-code activation equivalent, what is the maximum firing rate associated with a maximum activation value of 1
- MaxHz float32 `def:"180" min:"1" desc:"for translating spiking interval (rate) into rate-code activation equivalent, what is the maximum firing rate associated with a maximum activation value of 1"`
+ // for translating spiking interval (rate) into rate-code activation equivalent, what is the maximum firing rate associated with a maximum activation value of 1
+ MaxHz float32 `def:"180" min:"1"`
- // [def: 5] [min: 1] constant for integrating the spiking interval in estimating spiking rate
- ISITau float32 `def:"5" min:"1" desc:"constant for integrating the spiking interval in estimating spiking rate"`
+ // constant for integrating the spiking interval in estimating spiking rate
+ ISITau float32 `def:"5" min:"1"`
- // [view: -] rate = 1 / tau
- ISIDt float32 `view:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ ISIDt float32 `view:"-"`
- // [view: -] rate = 1 / tau
- RDt float32 `view:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ RDt float32 `view:"-"`
pad int32
}
@@ -124,23 +124,23 @@ func (sk *SpikeParams) AvgFmISI(avg float32, isi float32) float32 {
// DendParams are the parameters for updating dendrite-specific dynamics
type DendParams struct {
- // [def: 0.2,0.5] dendrite-specific strength multiplier of the exponential spiking drive on Vm -- e.g., .5 makes it half as strong as at the soma (which uses Gbar.L as a strength multiplier per the AdEx standard model)
- GbarExp float32 `def:"0.2,0.5" desc:"dendrite-specific strength multiplier of the exponential spiking drive on Vm -- e.g., .5 makes it half as strong as at the soma (which uses Gbar.L as a strength multiplier per the AdEx standard model)"`
+ // dendrite-specific strength multiplier of the exponential spiking drive on Vm -- e.g., .5 makes it half as strong as at the soma (which uses Gbar.L as a strength multiplier per the AdEx standard model)
+ GbarExp float32 `def:"0.2,0.5"`
- // [def: 3,6] dendrite-specific conductance of Kdr delayed rectifier currents, used to reset membrane potential for dendrite -- applied for Tr msec
- GbarR float32 `def:"3,6" desc:"dendrite-specific conductance of Kdr delayed rectifier currents, used to reset membrane potential for dendrite -- applied for Tr msec"`
+ // dendrite-specific conductance of Kdr delayed rectifier currents, used to reset membrane potential for dendrite -- applied for Tr msec
+ GbarR float32 `def:"3,6"`
- // [def: 0,2] SST+ somatostatin positive slow spiking inhibition level specifically affecting dendritic Vm (VmDend) -- this is important for countering a positive feedback loop from NMDA getting stronger over the course of learning -- also typically requires SubMean = 1 for TrgAvgAct and learning to fully counter this feedback loop.
- SSGi float32 `def:"0,2" desc:"SST+ somatostatin positive slow spiking inhibition level specifically affecting dendritic Vm (VmDend) -- this is important for countering a positive feedback loop from NMDA getting stronger over the course of learning -- also typically requires SubMean = 1 for TrgAvgAct and learning to fully counter this feedback loop."`
+ // SST+ somatostatin positive slow spiking inhibition level specifically affecting dendritic Vm (VmDend) -- this is important for countering a positive feedback loop from NMDA getting stronger over the course of learning -- also typically requires SubMean = 1 for TrgAvgAct and learning to fully counter this feedback loop.
+ SSGi float32 `def:"0,2"`
// set automatically based on whether this layer has any recv projections that have a GType conductance type of Modulatory -- if so, then multiply GeSyn etc by GModSyn
- HasMod slbool.Bool `inactive:"+" desc:"set automatically based on whether this layer has any recv projections that have a GType conductance type of Modulatory -- if so, then multiply GeSyn etc by GModSyn"`
+ HasMod slbool.Bool `inactive:"+"`
// multiplicative gain factor on the total modulatory input -- this can also be controlled by the PrjnScale.Abs factor on ModulatoryG inputs, but it is convenient to be able to control on the layer as well.
- ModGain float32 `desc:"multiplicative gain factor on the total modulatory input -- this can also be controlled by the PrjnScale.Abs factor on ModulatoryG inputs, but it is convenient to be able to control on the layer as well."`
+ ModGain float32
// baseline modulatory level for modulatory effects -- net modulation is ModBase + ModGain * GModSyn
- ModBase float32 `desc:"baseline modulatory level for modulatory effects -- net modulation is ModBase + ModGain * GModSyn"`
+ ModBase float32
pad, pad1 int32
}
@@ -163,23 +163,23 @@ func (dp *DendParams) Update() {
// Initialized in InitActs called by InitWts, and provides target values for DecayState.
type ActInitParams struct {
- // [def: 0.3] initial membrane potential -- see Erev.L for the resting potential (typically .3)
- Vm float32 `def:"0.3" desc:"initial membrane potential -- see Erev.L for the resting potential (typically .3)"`
+ // initial membrane potential -- see Erev.L for the resting potential (typically .3)
+ Vm float32 `def:"0.3"`
- // [def: 0] initial activation value -- typically 0
- Act float32 `def:"0" desc:"initial activation value -- typically 0"`
+ // initial activation value -- typically 0
+ Act float32 `def:"0"`
- // [def: 0] baseline level of excitatory conductance (net input) -- Ge is initialized to this value, and it is added in as a constant background level of excitatory input -- captures all the other inputs not represented in the model, and intrinsic excitability, etc
- GeBase float32 `def:"0" desc:"baseline level of excitatory conductance (net input) -- Ge is initialized to this value, and it is added in as a constant background level of excitatory input -- captures all the other inputs not represented in the model, and intrinsic excitability, etc"`
+ // baseline level of excitatory conductance (net input) -- Ge is initialized to this value, and it is added in as a constant background level of excitatory input -- captures all the other inputs not represented in the model, and intrinsic excitability, etc
+ GeBase float32 `def:"0"`
- // [def: 0] baseline level of inhibitory conductance (net input) -- Gi is initialized to this value, and it is added in as a constant background level of inhibitory input -- captures all the other inputs not represented in the model
- GiBase float32 `def:"0" desc:"baseline level of inhibitory conductance (net input) -- Gi is initialized to this value, and it is added in as a constant background level of inhibitory input -- captures all the other inputs not represented in the model"`
+ // baseline level of inhibitory conductance (net input) -- Gi is initialized to this value, and it is added in as a constant background level of inhibitory input -- captures all the other inputs not represented in the model
+ GiBase float32 `def:"0"`
- // [def: 0] variance (sigma) of gaussian distribution around baseline Ge values, per unit, to establish variability in intrinsic excitability. value never goes < 0
- GeVar float32 `def:"0" desc:"variance (sigma) of gaussian distribution around baseline Ge values, per unit, to establish variability in intrinsic excitability. value never goes < 0"`
+ // variance (sigma) of gaussian distribution around baseline Ge values, per unit, to establish variability in intrinsic excitability. value never goes < 0
+ GeVar float32 `def:"0"`
- // [def: 0] variance (sigma) of gaussian distribution around baseline Gi values, per unit, to establish variability in intrinsic excitability. value never goes < 0
- GiVar float32 `def:"0" desc:"variance (sigma) of gaussian distribution around baseline Gi values, per unit, to establish variability in intrinsic excitability. value never goes < 0"`
+ // variance (sigma) of gaussian distribution around baseline Gi values, per unit, to establish variability in intrinsic excitability. value never goes < 0
+ GiVar float32 `def:"0"`
pad, pad1 int32
}
@@ -231,20 +231,20 @@ func (ai *ActInitParams) GetGiBase(rnd erand.Rand) float32 {
// called in NewState when a new state is to be processed.
type DecayParams struct {
- // [def: 0,0.2,0.5,1] [min: 0] [max: 1] proportion to decay most activation state variables toward initial values at start of every ThetaCycle (except those controlled separately below) -- if 1 it is effectively equivalent to full clear, resetting other derived values. ISI is reset every AlphaCycle to get a fresh sample of activations (doesn't affect direct computation -- only readout).
- Act float32 `def:"0,0.2,0.5,1" max:"1" min:"0" desc:"proportion to decay most activation state variables toward initial values at start of every ThetaCycle (except those controlled separately below) -- if 1 it is effectively equivalent to full clear, resetting other derived values. ISI is reset every AlphaCycle to get a fresh sample of activations (doesn't affect direct computation -- only readout)."`
+ // proportion to decay most activation state variables toward initial values at start of every ThetaCycle (except those controlled separately below) -- if 1 it is effectively equivalent to full clear, resetting other derived values. ISI is reset every AlphaCycle to get a fresh sample of activations (doesn't affect direct computation -- only readout).
+ Act float32 `def:"0,0.2,0.5,1" max:"1" min:"0"`
- // [def: 0,0.6] [min: 0] [max: 1] proportion to decay long-lasting conductances, NMDA and GABA, and also the dendritic membrane potential -- when using random stimulus order, it is important to decay this significantly to allow a fresh start -- but set Act to 0 to enable ongoing activity to keep neurons in their sensitive regime.
- Glong float32 `def:"0,0.6" max:"1" min:"0" desc:"proportion to decay long-lasting conductances, NMDA and GABA, and also the dendritic membrane potential -- when using random stimulus order, it is important to decay this significantly to allow a fresh start -- but set Act to 0 to enable ongoing activity to keep neurons in their sensitive regime."`
+ // proportion to decay long-lasting conductances, NMDA and GABA, and also the dendritic membrane potential -- when using random stimulus order, it is important to decay this significantly to allow a fresh start -- but set Act to 0 to enable ongoing activity to keep neurons in their sensitive regime.
+ Glong float32 `def:"0,0.6" max:"1" min:"0"`
- // [def: 0] [min: 0] [max: 1] decay of afterhyperpolarization currents, including mAHP, sAHP, and KNa -- has a separate decay because often useful to have this not decay at all even if decay is on.
- AHP float32 `def:"0" max:"1" min:"0" desc:"decay of afterhyperpolarization currents, including mAHP, sAHP, and KNa -- has a separate decay because often useful to have this not decay at all even if decay is on."`
+ // decay of afterhyperpolarization currents, including mAHP, sAHP, and KNa -- has a separate decay because often useful to have this not decay at all even if decay is on.
+ AHP float32 `def:"0" max:"1" min:"0"`
- // [def: 0] [min: 0] [max: 1] decay of Ca variables driven by spiking activity used in learning: CaSpk* and Ca* variables. These are typically not decayed but may need to be in some situations.
- LearnCa float32 `def:"0" max:"1" min:"0" desc:"decay of Ca variables driven by spiking activity used in learning: CaSpk* and Ca* variables. These are typically not decayed but may need to be in some situations."`
+ // decay of Ca variables driven by spiking activity used in learning: CaSpk* and Ca* variables. These are typically not decayed but may need to be in some situations.
+ LearnCa float32 `def:"0" max:"1" min:"0"`
// decay layer at end of ThetaCycle when there is a global reward -- true by default for PTPred, PTMaint and PFC Super layers
- OnRew slbool.Bool `desc:"decay layer at end of ThetaCycle when there is a global reward -- true by default for PTPred, PTMaint and PFC Super layers"`
+ OnRew slbool.Bool
pad, pad1, pad2 float32
}
@@ -265,53 +265,53 @@ func (dp *DecayParams) Defaults() {
// DtParams are time and rate constants for temporal derivatives in Axon (Vm, G)
type DtParams struct {
- // [def: 1,0.5] [min: 0] overall rate constant for numerical integration, for all equations at the unit level -- all time constants are specified in millisecond units, with one cycle = 1 msec -- if you instead want to make one cycle = 2 msec, you can do this globally by setting this integ value to 2 (etc). However, stability issues will likely arise if you go too high. For improved numerical stability, you may even need to reduce this value to 0.5 or possibly even lower (typically however this is not necessary). MUST also coordinate this with network.time_inc variable to ensure that global network.time reflects simulated time accurately
- Integ float32 `def:"1,0.5" min:"0" desc:"overall rate constant for numerical integration, for all equations at the unit level -- all time constants are specified in millisecond units, with one cycle = 1 msec -- if you instead want to make one cycle = 2 msec, you can do this globally by setting this integ value to 2 (etc). However, stability issues will likely arise if you go too high. For improved numerical stability, you may even need to reduce this value to 0.5 or possibly even lower (typically however this is not necessary). MUST also coordinate this with network.time_inc variable to ensure that global network.time reflects simulated time accurately"`
+ // overall rate constant for numerical integration, for all equations at the unit level -- all time constants are specified in millisecond units, with one cycle = 1 msec -- if you instead want to make one cycle = 2 msec, you can do this globally by setting this integ value to 2 (etc). However, stability issues will likely arise if you go too high. For improved numerical stability, you may even need to reduce this value to 0.5 or possibly even lower (typically however this is not necessary). MUST also coordinate this with network.time_inc variable to ensure that global network.time reflects simulated time accurately
+ Integ float32 `def:"1,0.5" min:"0"`
- // [def: 2.81] [min: 1] membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized
- VmTau float32 `def:"2.81" min:"1" desc:"membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized"`
+ // membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized
+ VmTau float32 `def:"2.81" min:"1"`
- // [def: 5] [min: 1] dendritic membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized
- VmDendTau float32 `def:"5" min:"1" desc:"dendritic membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized"`
+ // dendritic membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized
+ VmDendTau float32 `def:"5" min:"1"`
- // [def: 2] [min: 1] number of integration steps to take in computing new Vm value -- this is the one computation that can be most numerically unstable so taking multiple steps with proportionally smaller dt is beneficial
- VmSteps int32 `def:"2" min:"1" desc:"number of integration steps to take in computing new Vm value -- this is the one computation that can be most numerically unstable so taking multiple steps with proportionally smaller dt is beneficial"`
+ // number of integration steps to take in computing new Vm value -- this is the one computation that can be most numerically unstable so taking multiple steps with proportionally smaller dt is beneficial
+ VmSteps int32 `def:"2" min:"1"`
- // [def: 5] [min: 1] time constant for decay of excitatory AMPA receptor conductance.
- GeTau float32 `def:"5" min:"1" desc:"time constant for decay of excitatory AMPA receptor conductance."`
+ // time constant for decay of excitatory AMPA receptor conductance.
+ GeTau float32 `def:"5" min:"1"`
- // [def: 7] [min: 1] time constant for decay of inhibitory GABAa receptor conductance.
- GiTau float32 `def:"7" min:"1" desc:"time constant for decay of inhibitory GABAa receptor conductance."`
+ // time constant for decay of inhibitory GABAa receptor conductance.
+ GiTau float32 `def:"7" min:"1"`
- // [def: 40] [min: 1] time constant for integrating values over timescale of an individual input state (e.g., roughly 200 msec -- theta cycle), used in computing ActInt, GeInt from Ge, and GiInt from GiSyn -- this is used for scoring performance, not for learning, in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life),
- IntTau float32 `def:"40" min:"1" desc:"time constant for integrating values over timescale of an individual input state (e.g., roughly 200 msec -- theta cycle), used in computing ActInt, GeInt from Ge, and GiInt from GiSyn -- this is used for scoring performance, not for learning, in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life), "`
+ // time constant for integrating values over timescale of an individual input state (e.g., roughly 200 msec -- theta cycle), used in computing ActInt, GeInt from Ge, and GiInt from GiSyn -- this is used for scoring performance, not for learning, in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life),
+ IntTau float32 `def:"40" min:"1"`
- // [def: 20] [min: 1] time constant for integrating slower long-time-scale averages, such as nrn.ActAvg, Pool.ActsMAvg, ActsPAvg -- computed in NewState when a new input state is present (i.e., not msec but in units of a theta cycle) (tau is roughly how long it takes for value to change significantly) -- set lower for smaller models
- LongAvgTau float32 `def:"20" min:"1" desc:"time constant for integrating slower long-time-scale averages, such as nrn.ActAvg, Pool.ActsMAvg, ActsPAvg -- computed in NewState when a new input state is present (i.e., not msec but in units of a theta cycle) (tau is roughly how long it takes for value to change significantly) -- set lower for smaller models"`
+ // time constant for integrating slower long-time-scale averages, such as nrn.ActAvg, Pool.ActsMAvg, ActsPAvg -- computed in NewState when a new input state is present (i.e., not msec but in units of a theta cycle) (tau is roughly how long it takes for value to change significantly) -- set lower for smaller models
+ LongAvgTau float32 `def:"20" min:"1"`
- // [def: 10] [min: 0] cycle to start updating the SpkMaxCa, SpkMax values within a theta cycle -- early cycles often reflect prior state
- MaxCycStart int32 `def:"10" min:"0" desc:"cycle to start updating the SpkMaxCa, SpkMax values within a theta cycle -- early cycles often reflect prior state"`
+ // cycle to start updating the SpkMaxCa, SpkMax values within a theta cycle -- early cycles often reflect prior state
+ MaxCycStart int32 `def:"10" min:"0"`
- // [view: -] nominal rate = Integ / tau
- VmDt float32 `view:"-" json:"-" xml:"-" desc:"nominal rate = Integ / tau"`
+ // nominal rate = Integ / tau
+ VmDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] nominal rate = Integ / tau
- VmDendDt float32 `view:"-" json:"-" xml:"-" desc:"nominal rate = Integ / tau"`
+ // nominal rate = Integ / tau
+ VmDendDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] 1 / VmSteps
- DtStep float32 `view:"-" json:"-" xml:"-" desc:"1 / VmSteps"`
+ // 1 / VmSteps
+ DtStep float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = Integ / tau
- GeDt float32 `view:"-" json:"-" xml:"-" desc:"rate = Integ / tau"`
+ // rate = Integ / tau
+ GeDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = Integ / tau
- GiDt float32 `view:"-" json:"-" xml:"-" desc:"rate = Integ / tau"`
+ // rate = Integ / tau
+ GiDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = Integ / tau
- IntDt float32 `view:"-" json:"-" xml:"-" desc:"rate = Integ / tau"`
+ // rate = Integ / tau
+ IntDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- LongAvgDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ LongAvgDt float32 `view:"-" json:"-" xml:"-"`
}
func (dp *DtParams) Update() {
@@ -393,25 +393,25 @@ func (dp *DtParams) AvgVarUpdt(avg, vr *float32, val float32) {
type SpikeNoiseParams struct {
// add noise simulating background spiking levels
- On slbool.Bool `desc:"add noise simulating background spiking levels"`
+ On slbool.Bool
- // [def: 100] [viewif: On] mean frequency of excitatory spikes -- typically 50Hz but multiple inputs increase rate -- poisson lambda parameter, also the variance
- GeHz float32 `viewif:"On" def:"100" desc:"mean frequency of excitatory spikes -- typically 50Hz but multiple inputs increase rate -- poisson lambda parameter, also the variance"`
+ // mean frequency of excitatory spikes -- typically 50Hz but multiple inputs increase rate -- poisson lambda parameter, also the variance
+ GeHz float32 `viewif:"On" def:"100"`
- // [viewif: On] [min: 0] excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs
- Ge float32 `viewif:"On" min:"0" desc:"excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs"`
+ // excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs
+ Ge float32 `viewif:"On" min:"0"`
- // [def: 200] [viewif: On] mean frequency of inhibitory spikes -- typically 100Hz fast spiking but multiple inputs increase rate -- poisson lambda parameter, also the variance
- GiHz float32 `viewif:"On" def:"200" desc:"mean frequency of inhibitory spikes -- typically 100Hz fast spiking but multiple inputs increase rate -- poisson lambda parameter, also the variance"`
+ // mean frequency of inhibitory spikes -- typically 100Hz fast spiking but multiple inputs increase rate -- poisson lambda parameter, also the variance
+ GiHz float32 `viewif:"On" def:"200"`
- // [viewif: On] [min: 0] excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs
- Gi float32 `viewif:"On" min:"0" desc:"excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs"`
+ // excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs
+ Gi float32 `viewif:"On" min:"0"`
- // [view: -] Exp(-Interval) which is the threshold for GeNoiseP as it is updated
- GeExpInt float32 `view:"-" json:"-" xml:"-" desc:"Exp(-Interval) which is the threshold for GeNoiseP as it is updated"`
+ // Exp(-Interval) which is the threshold for GeNoiseP as it is updated
+ GeExpInt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] Exp(-Interval) which is the threshold for GiNoiseP as it is updated
- GiExpInt float32 `view:"-" json:"-" xml:"-" desc:"Exp(-Interval) which is the threshold for GiNoiseP as it is updated"`
+ // Exp(-Interval) which is the threshold for GiNoiseP as it is updated
+ GiExpInt float32 `view:"-" json:"-" xml:"-"`
pad int32
}
@@ -460,19 +460,19 @@ func (an *SpikeNoiseParams) PGi(ctx *Context, p *float32, ni uint32) float32 {
type ClampParams struct {
// is this a clamped input layer? set automatically based on layer type at initialization
- IsInput slbool.Bool `inactive:"+" desc:"is this a clamped input layer? set automatically based on layer type at initialization"`
+ IsInput slbool.Bool `inactive:"+"`
// is this a target layer? set automatically based on layer type at initialization
- IsTarget slbool.Bool `inactive:"+" desc:"is this a target layer? set automatically based on layer type at initialization"`
+ IsTarget slbool.Bool `inactive:"+"`
- // [def: 0.8,1.5] amount of Ge driven for clamping -- generally use 0.8 for Target layers, 1.5 for Input layers
- Ge float32 `def:"0.8,1.5" desc:"amount of Ge driven for clamping -- generally use 0.8 for Target layers, 1.5 for Input layers"`
+ // amount of Ge driven for clamping -- generally use 0.8 for Target layers, 1.5 for Input layers
+ Ge float32 `def:"0.8,1.5"`
- // [def: false] [view: add external conductance on top of any existing -- generally this is not a good idea for target layers (creates a main effect that learning can never match), but may be ok for input layers]
+ //
Add slbool.Bool `def:"false" view:"add external conductance on top of any existing -- generally this is not a good idea for target layers (creates a main effect that learning can never match), but may be ok for input layers"`
- // [def: 0.5] threshold on neuron Act activity to count as active for computing error relative to target in PctErr method
- ErrThr float32 `def:"0.5" desc:"threshold on neuron Act activity to count as active for computing error relative to target in PctErr method"`
+ // threshold on neuron Act activity to count as active for computing error relative to target in PctErr method
+ ErrThr float32 `def:"0.5"`
pad, pad1, pad2 float32
}
@@ -492,13 +492,13 @@ func (cp *ClampParams) Defaults() {
type AttnParams struct {
// is attentional modulation active?
- On slbool.Bool `desc:"is attentional modulation active?"`
+ On slbool.Bool
- // [viewif: On] minimum act multiplier if attention is 0
- Min float32 `viewif:"On" desc:"minimum act multiplier if attention is 0"`
+ // minimum act multiplier if attention is 0
+ Min float32 `viewif:"On"`
// threshold on CaSpkP for determining the reaction time for the Layer -- starts after MaxCycStart to ensure that prior trial activity has had a chance to dissipate.
- RTThr float32 `desc:"threshold on CaSpkP for determining the reaction time for the Layer -- starts after MaxCycStart to ensure that prior trial activity has had a chance to dissipate."`
+ RTThr float32
pad int32
}
@@ -535,28 +535,28 @@ func (at *AttnParams) ModVal(val float32, attn float32) float32 {
type PopCodeParams struct {
// use popcode encoding of variable(s) that this layer represents
- On slbool.Bool `desc:"use popcode encoding of variable(s) that this layer represents"`
+ On slbool.Bool
- // [def: 0.1] [viewif: On] Ge multiplier for driving excitatory conductance based on PopCode -- multiplies normalized activation values
- Ge float32 `viewif:"On" def:"0.1" desc:"Ge multiplier for driving excitatory conductance based on PopCode -- multiplies normalized activation values"`
+ // Ge multiplier for driving excitatory conductance based on PopCode -- multiplies normalized activation values
+ Ge float32 `viewif:"On" def:"0.1"`
- // [def: -0.1] [viewif: On] minimum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode
- Min float32 `viewif:"On" def:"-0.1" desc:"minimum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode"`
+ // minimum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode
+ Min float32 `viewif:"On" def:"-0.1"`
- // [def: 1.1] [viewif: On] maximum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode
- Max float32 `viewif:"On" def:"1.1" desc:"maximum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode"`
+ // maximum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode
+ Max float32 `viewif:"On" def:"1.1"`
- // [def: 1,0.5] [viewif: On] activation multiplier for values at Min end of range, where values at Max end have an activation of 1 -- if this is < 1, then there is a rate code proportional to the value in addition to the popcode pattern -- see also MinSigma, MaxSigma
- MinAct float32 `viewif:"On" def:"1,0.5" desc:"activation multiplier for values at Min end of range, where values at Max end have an activation of 1 -- if this is < 1, then there is a rate code proportional to the value in addition to the popcode pattern -- see also MinSigma, MaxSigma"`
+ // activation multiplier for values at Min end of range, where values at Max end have an activation of 1 -- if this is < 1, then there is a rate code proportional to the value in addition to the popcode pattern -- see also MinSigma, MaxSigma
+ MinAct float32 `viewif:"On" def:"1,0.5"`
- // [def: 0.1,0.08] [viewif: On] sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally
- MinSigma float32 `viewif:"On" def:"0.1,0.08" desc:"sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally"`
+ // sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally
+ MinSigma float32 `viewif:"On" def:"0.1,0.08"`
- // [def: 0.1,0.12] [viewif: On] sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally
- MaxSigma float32 `viewif:"On" def:"0.1,0.12" desc:"sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally"`
+ // sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally
+ MaxSigma float32 `viewif:"On" def:"0.1,0.12"`
- // [viewif: On] ensure that encoded and decoded value remains within specified range
- Clip slbool.Bool `viewif:"On" desc:"ensure that encoded and decoded value remains within specified range"`
+ // ensure that encoded and decoded value remains within specified range
+ Clip slbool.Bool `viewif:"On"`
}
func (pc *PopCodeParams) Defaults() {
@@ -635,68 +635,68 @@ func (pc *PopCodeParams) EncodeGe(i, n uint32, val float32) float32 {
// This is included in axon.Layer to drive the computation.
type ActParams struct {
- // [view: inline] Spiking function parameters
- Spikes SpikeParams `view:"inline" desc:"Spiking function parameters"`
+ // Spiking function parameters
+ Spikes SpikeParams `view:"inline"`
- // [view: inline] dendrite-specific parameters
- Dend DendParams `view:"inline" desc:"dendrite-specific parameters"`
+ // dendrite-specific parameters
+ Dend DendParams `view:"inline"`
- // [view: inline] initial values for key network state variables -- initialized in InitActs called by InitWts, and provides target values for DecayState
- Init ActInitParams `view:"inline" desc:"initial values for key network state variables -- initialized in InitActs called by InitWts, and provides target values for DecayState"`
+ // initial values for key network state variables -- initialized in InitActs called by InitWts, and provides target values for DecayState
+ Init ActInitParams `view:"inline"`
- // [view: inline] amount to decay between AlphaCycles, simulating passage of time and effects of saccades etc, especially important for environments with random temporal structure (e.g., most standard neural net training corpora)
- Decay DecayParams `view:"inline" desc:"amount to decay between AlphaCycles, simulating passage of time and effects of saccades etc, especially important for environments with random temporal structure (e.g., most standard neural net training corpora) "`
+ // amount to decay between AlphaCycles, simulating passage of time and effects of saccades etc, especially important for environments with random temporal structure (e.g., most standard neural net training corpora)
+ Decay DecayParams `view:"inline"`
- // [view: inline] time and rate constants for temporal derivatives / updating of activation state
- Dt DtParams `view:"inline" desc:"time and rate constants for temporal derivatives / updating of activation state"`
+ // time and rate constants for temporal derivatives / updating of activation state
+ Dt DtParams `view:"inline"`
- // [view: inline] [Defaults: 1, .2, 1, 1] maximal conductances levels for channels
- Gbar chans.Chans `view:"inline" desc:"[Defaults: 1, .2, 1, 1] maximal conductances levels for channels"`
+ // maximal conductances levels for channels
+ Gbar chans.Chans `view:"inline"`
- // [view: inline] [Defaults: 1, .3, .25, .1] reversal potentials for each channel
- Erev chans.Chans `view:"inline" desc:"[Defaults: 1, .3, .25, .1] reversal potentials for each channel"`
+ // reversal potentials for each channel
+ Erev chans.Chans `view:"inline"`
- // [view: inline] how external inputs drive neural activations
- Clamp ClampParams `view:"inline" desc:"how external inputs drive neural activations"`
+ // how external inputs drive neural activations
+ Clamp ClampParams `view:"inline"`
- // [view: inline] how, where, when, and how much noise to add
- Noise SpikeNoiseParams `view:"inline" desc:"how, where, when, and how much noise to add"`
+ // how, where, when, and how much noise to add
+ Noise SpikeNoiseParams `view:"inline"`
- // [view: inline] range for Vm membrane potential -- [0.1, 1.0] -- important to keep just at extreme range of reversal potentials to prevent numerical instability
- VmRange minmax.F32 `view:"inline" desc:"range for Vm membrane potential -- [0.1, 1.0] -- important to keep just at extreme range of reversal potentials to prevent numerical instability"`
+ // range for Vm membrane potential -- -- important to keep just at extreme range of reversal potentials to prevent numerical instability
+ VmRange minmax.F32 `view:"inline"`
- // [view: inline] M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes
- Mahp chans.MahpParams `view:"inline" desc:"M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes"`
+ // M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes
+ Mahp chans.MahpParams `view:"inline"`
- // [view: inline] slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron
- Sahp chans.SahpParams `view:"inline" desc:"slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron"`
+ // slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron
+ Sahp chans.SahpParams `view:"inline"`
- // [view: inline] sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)
- KNa chans.KNaMedSlow `view:"inline" desc:"sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)"`
+ // sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)
+ KNa chans.KNaMedSlow `view:"inline"`
- // [view: inline] NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning.
- NMDA chans.NMDAParams `view:"inline" desc:"NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning."`
+ // NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning.
+ NMDA chans.NMDAParams `view:"inline"`
- // [view: inline] NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning.
- MaintNMDA chans.NMDAParams `view:"inline" desc:"NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning."`
+ // NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning.
+ MaintNMDA chans.NMDAParams `view:"inline"`
- // [view: inline] GABA-B / GIRK channel parameters
- GabaB chans.GABABParams `view:"inline" desc:"GABA-B / GIRK channel parameters"`
+ // GABA-B / GIRK channel parameters
+ GabaB chans.GABABParams `view:"inline"`
- // [view: inline] voltage gated calcium channels -- provide a key additional source of Ca for learning and positive-feedback loop upstate for active neurons
- VGCC chans.VGCCParams `view:"inline" desc:"voltage gated calcium channels -- provide a key additional source of Ca for learning and positive-feedback loop upstate for active neurons"`
+ // voltage gated calcium channels -- provide a key additional source of Ca for learning and positive-feedback loop upstate for active neurons
+ VGCC chans.VGCCParams `view:"inline"`
- // [view: inline] A-type potassium (K) channel that is particularly important for limiting the runaway excitation from VGCC channels
- AK chans.AKsParams `view:"inline" desc:"A-type potassium (K) channel that is particularly important for limiting the runaway excitation from VGCC channels"`
+ // A-type potassium (K) channel that is particularly important for limiting the runaway excitation from VGCC channels
+ AK chans.AKsParams `view:"inline"`
- // [view: inline] small-conductance calcium-activated potassium channel produces the pausing function as a consequence of rapid bursting.
- SKCa chans.SKCaParams `view:"inline" desc:"small-conductance calcium-activated potassium channel produces the pausing function as a consequence of rapid bursting."`
+ // small-conductance calcium-activated potassium channel produces the pausing function as a consequence of rapid bursting.
+ SKCa chans.SKCaParams `view:"inline"`
- // [view: inline] Attentional modulation parameters: how Attn modulates Ge
- AttnMod AttnParams `view:"inline" desc:"Attentional modulation parameters: how Attn modulates Ge"`
+ // Attentional modulation parameters: how Attn modulates Ge
+ AttnMod AttnParams `view:"inline"`
- // [view: inline] provides encoding population codes, used to represent a single continuous (scalar) value, across a population of units / neurons (1 dimensional)
- PopCode PopCodeParams `view:"inline" desc:"provides encoding population codes, used to represent a single continuous (scalar) value, across a population of units / neurons (1 dimensional)"`
+ // provides encoding population codes, used to represent a single continuous (scalar) value, across a population of units / neurons (1 dimensional)
+ PopCode PopCodeParams `view:"inline"`
}
func (ac *ActParams) Defaults() {
diff --git a/axon/act_prjn.go b/axon/act_prjn.go
index 41fe2bb2b..236e1ebc5 100644
--- a/axon/act_prjn.go
+++ b/axon/act_prjn.go
@@ -7,25 +7,16 @@ package axon
import (
"log"
- "github.com/emer/emergent/erand"
- "github.com/goki/gosl/slbool"
- "github.com/goki/ki/ints"
- "github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/gosl/v2/slbool"
+ "goki.dev/mat32/v2"
)
-//go:generate stringer -type=PrjnGTypes
-
-var KiT_PrjnGTypes = kit.Enums.AddEnum(PrjnGTypesN, kit.NotBitFlag, nil)
-
-func (ev PrjnGTypes) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *PrjnGTypes) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
-
//gosl: start act_prjn
// PrjnGTypes represents the conductance (G) effects of a given projection,
// including excitatory, inhibitory, and modulatory.
-type PrjnGTypes int32
+type PrjnGTypes int32 //enums:enum
// The projection conductance types
const (
@@ -49,8 +40,6 @@ const (
// Context projections are for inputs to CT layers, which update
// only at the end of the plus phase, and send to CtxtGe.
ContextG
-
- PrjnGTypesN
)
//////////////////////////////////////////////////////////////////////////////////////
@@ -63,22 +52,22 @@ const (
type SynComParams struct {
// type of conductance (G) communicated by this projection
- GType PrjnGTypes `desc:"type of conductance (G) communicated by this projection"`
+ GType PrjnGTypes
- // [def: 2] [min: 0] additional synaptic delay in msec for inputs arriving at this projection. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Prjn in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value.
- Delay uint32 `min:"0" def:"2" desc:"additional synaptic delay in msec for inputs arriving at this projection. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Prjn in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value."`
+ // additional synaptic delay in msec for inputs arriving at this projection. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Prjn in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value.
+ Delay uint32 `min:"0" def:"2"`
// maximum value of Delay -- based on MaxDelay values when the BuildGBuf function was called when the network was built -- cannot set it longer than this, except by calling BuildGBuf on network after changing MaxDelay to a larger value in any projection in the network.
- MaxDelay uint32 `inactive:"+" desc:"maximum value of Delay -- based on MaxDelay values when the BuildGBuf function was called when the network was built -- cannot set it longer than this, except by calling BuildGBuf on network after changing MaxDelay to a larger value in any projection in the network."`
+ MaxDelay uint32 `inactive:"+"`
// probability of synaptic transmission failure -- if > 0, then weights are turned off at random as a function of PFail (times 1-SWt if PFailSwt)
- PFail float32 `desc:"probability of synaptic transmission failure -- if > 0, then weights are turned off at random as a function of PFail (times 1-SWt if PFailSwt)"`
+ PFail float32
// if true, then probability of failure is inversely proportional to SWt structural / slow weight value (i.e., multiply PFail * (1-SWt)))
- PFailSWt slbool.Bool `desc:"if true, then probability of failure is inversely proportional to SWt structural / slow weight value (i.e., multiply PFail * (1-SWt)))"`
+ PFailSWt slbool.Bool
- // [view: -] delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed
- DelLen uint32 `view:"-" desc:"delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed"`
+ // delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed
+ DelLen uint32 `view:"-"`
pad, pad1 float32
}
@@ -222,11 +211,11 @@ func (sc *SynComParams) Fail(ctx *Context, syni uint32, swt float32) {
// using both absolute and relative factors.
type PrjnScaleParams struct {
- // [min: 0] [Defaults: Forward=1, Back=0.2] relative scaling that shifts balance between different projections -- this is subject to normalization across all other projections into receiving neuron, and determines the GScale.Target for adapting scaling
- Rel float32 `min:"0" desc:"[Defaults: Forward=1, Back=0.2] relative scaling that shifts balance between different projections -- this is subject to normalization across all other projections into receiving neuron, and determines the GScale.Target for adapting scaling"`
+ // relative scaling that shifts balance between different projections -- this is subject to normalization across all other projections into receiving neuron, and determines the GScale.Target for adapting scaling
+ Rel float32 `min:"0"`
- // [def: 1] [min: 0] absolute multiplier adjustment factor for the prjn scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value
- Abs float32 `def:"1" min:"0" desc:"absolute multiplier adjustment factor for the prjn scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value"`
+ // absolute multiplier adjustment factor for the prjn scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value
+ Abs float32 `def:"1" min:"0"`
pad, pad1 float32
}
@@ -251,16 +240,16 @@ func (ws *PrjnScaleParams) SLayActScale(savg, snu, ncon float32) float32 {
}
semExtra := 2
slayActN := int(mat32.Round(savg * snu)) // sending layer actual # active
- slayActN = ints.MaxInt(slayActN, 1)
+ slayActN = max(slayActN, 1)
var sc float32
if ncon == snu {
sc = 1 / float32(slayActN)
} else {
maxActN := int(mat32.Min(ncon, float32(slayActN))) // max number we could get
avgActN := int(mat32.Round(savg * ncon)) // recv average actual # active if uniform
- avgActN = ints.MaxInt(avgActN, 1)
+ avgActN = max(avgActN, 1)
expActN := avgActN + semExtra // expected
- expActN = ints.MinInt(expActN, maxActN)
+ expActN = min(expActN, maxActN)
sc = 1 / float32(expActN)
}
return sc
diff --git a/axon/act_test.go b/axon/act_test.go
index 51395411f..9d233c97a 100644
--- a/axon/act_test.go
+++ b/axon/act_test.go
@@ -9,7 +9,7 @@ package axon
import (
"testing"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
// TOLERANCE is the numerical difference tolerance for comparing vs. target values
diff --git a/axon/avgmax.go b/axon/avgmax.go
index 3b66d7689..1e60718f0 100644
--- a/axon/avgmax.go
+++ b/axon/avgmax.go
@@ -37,19 +37,19 @@ func SetAvgMaxFloatFromIntErr(fun func()) {
type AvgMaxI32 struct {
// Average, from Calc when last computed as Sum / N
- Avg float32 `inactive:"+" desc:"Average, from Calc when last computed as Sum / N"`
+ Avg float32 `inactive:"+"`
// Maximum value, copied from CurMax in Calc
- Max float32 `inactive:"+" desc:"Maximum value, copied from CurMax in Calc"`
+ Max float32 `inactive:"+"`
// sum for computing average -- incremented in UpdateVal, reset in Calc
- Sum int32 `inactive:"+" desc:"sum for computing average -- incremented in UpdateVal, reset in Calc"`
+ Sum int32 `inactive:"+"`
// current maximum value, updated via UpdateVal, reset in Calc
- CurMax int32 `inactive:"+" desc:"current maximum value, updated via UpdateVal, reset in Calc"`
+ CurMax int32 `inactive:"+"`
// number of items in the sum -- this must be set in advance to a known value and it is used in computing the float <-> int conversion factor to maximize precision.
- N int32 `inactive:"+" desc:"number of items in the sum -- this must be set in advance to a known value and it is used in computing the float <-> int conversion factor to maximize precision."`
+ N int32 `inactive:"+"`
pad, pad1, pad2 int32
}
diff --git a/axon/axon.go b/axon/axon.go
index d96aef570..da5795926 100644
--- a/axon/axon.go
+++ b/axon/axon.go
@@ -4,8 +4,10 @@
package axon
+//go:generate goki generate -add-types
+
import (
- "github.com/emer/emergent/emer"
+ "github.com/emer/emergent/v2/emer"
)
// AxonNetwork defines the essential algorithmic API for Axon, at the network level.
diff --git a/axon/basic_test.go b/axon/basic_test.go
index 1fab1b8c2..682cec384 100644
--- a/axon/basic_test.go
+++ b/axon/basic_test.go
@@ -17,13 +17,13 @@ import (
"strings"
"testing"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/etensor"
- "github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/laser"
+ "goki.dev/mat32/v2"
"golang.org/x/exp/maps"
)
@@ -260,7 +260,7 @@ func TestSpikeProp(t *testing.T) {
// StructVals adds field vals to given vals map
func StructVals(obj any, vals map[string]float32, key string) {
- v := kit.NonPtrValue(reflect.ValueOf(obj))
+ v := laser.NonPtrValue(reflect.ValueOf(obj))
typ := v.Type()
for i := 0; i < v.NumField(); i++ {
ft := typ.Field(i)
@@ -269,7 +269,7 @@ func StructVals(obj any, vals map[string]float32, key string) {
}
fv := v.Field(i)
kk := key + fmt.Sprintf("\t%s", ft.Name)
- vals[kk], _ = kit.ToFloat32(fv.Interface())
+ vals[kk], _ = laser.ToFloat32(fv.Interface())
}
}
diff --git a/axon/context.go b/axon/context.go
index 85e6ea448..224400cad 100644
--- a/axon/context.go
+++ b/axon/context.go
@@ -7,10 +7,10 @@ package axon
import (
"math"
- "github.com/emer/emergent/etime"
- "github.com/goki/gosl/slbool"
- "github.com/goki/gosl/slrand"
- "github.com/goki/ki/bools"
+ "github.com/emer/emergent/v2/etime"
+ "goki.dev/glop/num"
+ "goki.dev/gosl/v2/slbool"
+ "goki.dev/gosl/v2/slrand"
)
var (
@@ -244,50 +244,50 @@ func (ctx *Context) CopyNetStridesFrom(srcCtx *Context) {
// NetIdxs are indexes and sizes for processing network
type NetIdxs struct {
- // [min: 1] number of data parallel items to process currently
- NData uint32 `min:"1" desc:"number of data parallel items to process currently"`
+ // number of data parallel items to process currently
+ NData uint32 `min:"1"`
// network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode
- NetIdx uint32 `inactive:"+" desc:"network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode"`
+ NetIdx uint32 `inactive:"+"`
// maximum amount of data parallel
- MaxData uint32 `inactive:"+" desc:"maximum amount of data parallel"`
+ MaxData uint32 `inactive:"+"`
// number of layers in the network
- NLayers uint32 `inactive:"+" desc:"number of layers in the network"`
+ NLayers uint32 `inactive:"+"`
// total number of neurons
- NNeurons uint32 `inactive:"+" desc:"total number of neurons"`
+ NNeurons uint32 `inactive:"+"`
// total number of pools excluding * MaxData factor
- NPools uint32 `inactive:"+" desc:"total number of pools excluding * MaxData factor"`
+ NPools uint32 `inactive:"+"`
// total number of synapses
- NSyns uint32 `inactive:"+" desc:"total number of synapses"`
+ NSyns uint32 `inactive:"+"`
// maximum size in float32 (4 bytes) of a GPU buffer -- needed for GPU access
- GPUMaxBuffFloats uint32 `inactive:"+" desc:"maximum size in float32 (4 bytes) of a GPU buffer -- needed for GPU access"`
+ GPUMaxBuffFloats uint32 `inactive:"+"`
// total number of SynCa banks of GPUMaxBufferBytes arrays in GPU
- GPUSynCaBanks uint32 `inactive:"+" desc:"total number of SynCa banks of GPUMaxBufferBytes arrays in GPU"`
+ GPUSynCaBanks uint32 `inactive:"+"`
// total number of PVLV Drives / positive USs
- PVLVNPosUSs uint32 `inactive:"+" desc:"total number of PVLV Drives / positive USs"`
+ PVLVNPosUSs uint32 `inactive:"+"`
// total number of PVLV Negative USs
- PVLVNNegUSs uint32 `inactive:"+" desc:"total number of PVLV Negative USs"`
+ PVLVNNegUSs uint32 `inactive:"+"`
// offset into GlobalVars for USneg values
- GvUSnegOff uint32 `inactive:"+" desc:"offset into GlobalVars for USneg values"`
+ GvUSnegOff uint32 `inactive:"+"`
// stride into GlobalVars for USneg values
- GvUSnegStride uint32 `inactive:"+" desc:"stride into GlobalVars for USneg values"`
+ GvUSnegStride uint32 `inactive:"+"`
// offset into GlobalVars for USpos, Drive, VSPatch values values
- GvUSposOff uint32 `inactive:"+" desc:"offset into GlobalVars for USpos, Drive, VSPatch values values"`
+ GvUSposOff uint32 `inactive:"+"`
// stride into GlobalVars for USpos, Drive, VSPatch values
- GvUSposStride uint32 `inactive:"+" desc:"stride into GlobalVars for USpos, Drive, VSPatch values"`
+ GvUSposStride uint32 `inactive:"+"`
pad uint32
}
@@ -349,72 +349,72 @@ func (ctx *NetIdxs) SynIdxIsValid(si uint32) bool {
type Context struct {
// current evaluation mode, e.g., Train, Test, etc
- Mode etime.Modes `desc:"current evaluation mode, e.g., Train, Test, etc"`
+ Mode etime.Modes
// if true, the model is being run in a testing mode, so no weight changes or other associated computations are needed. this flag should only affect learning-related behavior. Is automatically updated based on Mode != Train
- Testing slbool.Bool `inactive:"+" desc:"if true, the model is being run in a testing mode, so no weight changes or other associated computations are needed. this flag should only affect learning-related behavior. Is automatically updated based on Mode != Train"`
+ Testing slbool.Bool `inactive:"+"`
// phase counter: typicaly 0-1 for minus-plus but can be more phases for other algorithms
- Phase int32 `desc:"phase counter: typicaly 0-1 for minus-plus but can be more phases for other algorithms"`
+ Phase int32
// true if this is the plus phase, when the outcome / bursting is occurring, driving positive learning -- else minus phase
- PlusPhase slbool.Bool `desc:"true if this is the plus phase, when the outcome / bursting is occurring, driving positive learning -- else minus phase"`
+ PlusPhase slbool.Bool
// cycle within current phase -- minus or plus
- PhaseCycle int32 `desc:"cycle within current phase -- minus or plus"`
+ PhaseCycle int32
// cycle counter: number of iterations of activation updating (settling) on the current state -- this counts time sequentially until reset with NewState
- Cycle int32 `desc:"cycle counter: number of iterations of activation updating (settling) on the current state -- this counts time sequentially until reset with NewState"`
+ Cycle int32
- // [def: 200] length of the theta cycle in terms of 1 msec Cycles -- some network update steps depend on doing something at the end of the theta cycle (e.g., CTCtxtPrjn).
- ThetaCycles int32 `def:"200" desc:"length of the theta cycle in terms of 1 msec Cycles -- some network update steps depend on doing something at the end of the theta cycle (e.g., CTCtxtPrjn)."`
+ // length of the theta cycle in terms of 1 msec Cycles -- some network update steps depend on doing something at the end of the theta cycle (e.g., CTCtxtPrjn).
+ ThetaCycles int32 `def:"200"`
// total cycle count -- increments continuously from whenever it was last reset -- typically this is number of milliseconds in simulation time -- is int32 and not uint32 b/c used with Synapse CaUpT which needs to have a -1 case for expired update time
- CyclesTotal int32 `desc:"total cycle count -- increments continuously from whenever it was last reset -- typically this is number of milliseconds in simulation time -- is int32 and not uint32 b/c used with Synapse CaUpT which needs to have a -1 case for expired update time"`
+ CyclesTotal int32
// accumulated amount of time the network has been running, in simulation-time (not real world time), in seconds
- Time float32 `desc:"accumulated amount of time the network has been running, in simulation-time (not real world time), in seconds"`
+ Time float32
// total trial count -- increments continuously in NewState call *only in Train mode* from whenever it was last reset -- can be used for synchronizing weight updates across nodes
- TrialsTotal int32 `desc:"total trial count -- increments continuously in NewState call *only in Train mode* from whenever it was last reset -- can be used for synchronizing weight updates across nodes"`
+ TrialsTotal int32
- // [def: 0.001] amount of time to increment per cycle
- TimePerCycle float32 `def:"0.001" desc:"amount of time to increment per cycle"`
+ // amount of time to increment per cycle
+ TimePerCycle float32 `def:"0.001"`
- // [def: 100] how frequently to perform slow adaptive processes such as synaptic scaling, inhibition adaptation, associated in the brain with sleep, in the SlowAdapt method. This should be long enough for meaningful changes to accumulate -- 100 is default but could easily be longer in larger models. Because SlowCtr is incremented by NData, high NData cases (e.g. 16) likely need to increase this value -- e.g., 400 seems to produce overall consistent results in various models.
- SlowInterval int32 `def:"100" desc:"how frequently to perform slow adaptive processes such as synaptic scaling, inhibition adaptation, associated in the brain with sleep, in the SlowAdapt method. This should be long enough for meaningful changes to accumulate -- 100 is default but could easily be longer in larger models. Because SlowCtr is incremented by NData, high NData cases (e.g. 16) likely need to increase this value -- e.g., 400 seems to produce overall consistent results in various models."`
+ // how frequently to perform slow adaptive processes such as synaptic scaling, inhibition adaptation, associated in the brain with sleep, in the SlowAdapt method. This should be long enough for meaningful changes to accumulate -- 100 is default but could easily be longer in larger models. Because SlowCtr is incremented by NData, high NData cases (e.g. 16) likely need to increase this value -- e.g., 400 seems to produce overall consistent results in various models.
+ SlowInterval int32 `def:"100"`
// counter for how long it has been since last SlowAdapt step. Note that this is incremented by NData to maintain consistency across different values of this parameter.
- SlowCtr int32 `inactive:"+" desc:"counter for how long it has been since last SlowAdapt step. Note that this is incremented by NData to maintain consistency across different values of this parameter."`
+ SlowCtr int32 `inactive:"+"`
// synaptic calcium counter, which drives the CaUpT synaptic value to optimize updating of this computationally expensive factor. It is incremented by 1 for each cycle, and reset at the SlowInterval, at which point the synaptic calcium values are all reset.
- SynCaCtr float32 `inactive:"+" desc:"synaptic calcium counter, which drives the CaUpT synaptic value to optimize updating of this computationally expensive factor. It is incremented by 1 for each cycle, and reset at the SlowInterval, at which point the synaptic calcium values are all reset."`
+ SynCaCtr float32 `inactive:"+"`
pad, pad1 float32
- // [view: inline] indexes and sizes of current network
- NetIdxs NetIdxs `view:"inline" desc:"indexes and sizes of current network"`
+ // indexes and sizes of current network
+ NetIdxs NetIdxs `view:"inline"`
- // [view: -] stride offsets for accessing neuron variables
- NeuronVars NeuronVarStrides `view:"-" desc:"stride offsets for accessing neuron variables"`
+ // stride offsets for accessing neuron variables
+ NeuronVars NeuronVarStrides `view:"-"`
- // [view: -] stride offsets for accessing neuron average variables
- NeuronAvgVars NeuronAvgVarStrides `view:"-" desc:"stride offsets for accessing neuron average variables"`
+ // stride offsets for accessing neuron average variables
+ NeuronAvgVars NeuronAvgVarStrides `view:"-"`
- // [view: -] stride offsets for accessing neuron indexes
- NeuronIdxs NeuronIdxStrides `view:"-" desc:"stride offsets for accessing neuron indexes"`
+ // stride offsets for accessing neuron indexes
+ NeuronIdxs NeuronIdxStrides `view:"-"`
- // [view: -] stride offsets for accessing synapse variables
- SynapseVars SynapseVarStrides `view:"-" desc:"stride offsets for accessing synapse variables"`
+ // stride offsets for accessing synapse variables
+ SynapseVars SynapseVarStrides `view:"-"`
- // [view: -] stride offsets for accessing synapse Ca variables
- SynapseCaVars SynapseCaStrides `view:"-" desc:"stride offsets for accessing synapse Ca variables"`
+ // stride offsets for accessing synapse Ca variables
+ SynapseCaVars SynapseCaStrides `view:"-"`
- // [view: -] stride offsets for accessing synapse indexes
- SynapseIdxs SynapseIdxStrides `view:"-" desc:"stride offsets for accessing synapse indexes"`
+ // stride offsets for accessing synapse indexes
+ SynapseIdxs SynapseIdxStrides `view:"-"`
// random counter -- incremented by maximum number of possible random numbers generated per cycle, regardless of how many are actually used -- this is shared across all layers so must encompass all possible param settings.
- RandCtr slrand.Counter `desc:"random counter -- incremented by maximum number of possible random numbers generated per cycle, regardless of how many are actually used -- this is shared across all layers so must encompass all possible param settings."`
+ RandCtr slrand.Counter
}
// Defaults sets default values
@@ -742,7 +742,7 @@ func GlobalsReset(ctx *Context) {
// GlobalSetRew is a convenience function for setting the external reward
// state in Globals variables
func GlobalSetRew(ctx *Context, di uint32, rew float32, hasRew bool) {
- SetGlbV(ctx, di, GvHasRew, bools.ToFloat32(hasRew))
+ SetGlbV(ctx, di, GvHasRew, num.FromBool[float32](hasRew))
if hasRew {
SetGlbV(ctx, di, GvRew, rew)
} else {
diff --git a/axon/damodtypes_string.go b/axon/damodtypes_string.go
deleted file mode 100644
index 81604a5e5..000000000
--- a/axon/damodtypes_string.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Code generated by "stringer -type=DAModTypes"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[NoDAMod-0]
- _ = x[D1Mod-1]
- _ = x[D2Mod-2]
- _ = x[D1AbsMod-3]
- _ = x[DAModTypesN-4]
-}
-
-const _DAModTypes_name = "NoDAModD1ModD2ModD1AbsModDAModTypesN"
-
-var _DAModTypes_index = [...]uint8{0, 7, 12, 17, 25, 36}
-
-func (i DAModTypes) String() string {
- if i < 0 || i >= DAModTypes(len(_DAModTypes_index)-1) {
- return "DAModTypes(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _DAModTypes_name[_DAModTypes_index[i]:_DAModTypes_index[i+1]]
-}
-
-func (i *DAModTypes) FromString(s string) error {
- for j := 0; j < len(_DAModTypes_index)-1; j++ {
- if s == _DAModTypes_name[_DAModTypes_index[j]:_DAModTypes_index[j+1]] {
- *i = DAModTypes(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: DAModTypes")
-}
-
-var _DAModTypes_descMap = map[DAModTypes]string{
- 0: `NoDAMod means there is no effect of dopamine on neural activity`,
- 1: `D1Mod is for neurons that primarily express dopamine D1 receptors, which are excitatory from DA bursts, inhibitory from dips. Cortical neurons can generally use this type, while subcortical populations are more diverse in having both D1 and D2 subtypes.`,
- 2: `D2Mod is for neurons that primarily express dopamine D2 receptors, which are excitatory from DA dips, inhibitory from bursts.`,
- 3: `D1AbsMod is like D1Mod, except the absolute value of DA is used instead of the signed value. There are a subset of DA neurons that send increased DA for both negative and positive outcomes, targeting frontal neurons.`,
- 4: ``,
-}
-
-func (i DAModTypes) Desc() string {
- if str, ok := _DAModTypes_descMap[i]; ok {
- return str
- }
- return "DAModTypes(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/deep_layers.go b/axon/deep_layers.go
index 93232381a..489efc93f 100644
--- a/axon/deep_layers.go
+++ b/axon/deep_layers.go
@@ -5,8 +5,8 @@
package axon
import (
- "github.com/emer/emergent/params"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/params"
+ "goki.dev/mat32/v2"
)
//gosl: start deep_layers
@@ -15,11 +15,11 @@ import (
// CaSpkP integrated spiking values in Super layers -- thresholded.
type BurstParams struct {
- // [def: 0.1] [max: 1] Relative component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). This is the distance between the average and maximum activation values within layer (e.g., 0 = average, 1 = max). Overall effective threshold is MAX of relative and absolute thresholds.
- ThrRel float32 `max:"1" def:"0.1" desc:"Relative component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). This is the distance between the average and maximum activation values within layer (e.g., 0 = average, 1 = max). Overall effective threshold is MAX of relative and absolute thresholds."`
+ // Relative component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). This is the distance between the average and maximum activation values within layer (e.g., 0 = average, 1 = max). Overall effective threshold is MAX of relative and absolute thresholds.
+ ThrRel float32 `max:"1" def:"0.1"`
- // [def: 0.1] [min: 0] [max: 1] Absolute component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). Overall effective threshold is MAX of relative and absolute thresholds.
- ThrAbs float32 `min:"0" max:"1" def:"0.1" desc:"Absolute component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). Overall effective threshold is MAX of relative and absolute thresholds."`
+ // Absolute component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). Overall effective threshold is MAX of relative and absolute thresholds.
+ ThrAbs float32 `min:"0" max:"1" def:"0.1"`
pad, pad1 float32
}
@@ -42,14 +42,14 @@ func (bp *BurstParams) ThrFmAvgMax(avg, mx float32) float32 {
// CTParams control the CT corticothalamic neuron special behavior
type CTParams struct {
- // [def: 0.05,0.1,1,2] gain factor for context excitatory input, which is constant as compared to the spiking input from other projections, so it must be downscaled accordingly. This can make a difference and may need to be scaled up or down.
- GeGain float32 `def:"0.05,0.1,1,2" desc:"gain factor for context excitatory input, which is constant as compared to the spiking input from other projections, so it must be downscaled accordingly. This can make a difference and may need to be scaled up or down."`
+ // gain factor for context excitatory input, which is constant as compared to the spiking input from other projections, so it must be downscaled accordingly. This can make a difference and may need to be scaled up or down.
+ GeGain float32 `def:"0.05,0.1,1,2"`
- // [def: 0,50] decay time constant for context Ge input -- if > 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50
- DecayTau float32 `def:"0,50" desc:"decay time constant for context Ge input -- if > 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50"`
+ // decay time constant for context Ge input -- if > 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50
+ DecayTau float32 `def:"0,50"`
- // [view: -] 1 / tau
- DecayDt float32 `view:"-" json:"-" xml:"-" desc:"1 / tau"`
+ // 1 / tau
+ DecayDt float32 `view:"-" json:"-" xml:"-"`
pad float32
}
@@ -73,14 +73,14 @@ func (cp *CTParams) Defaults() {
// the corresponding driver neuron Burst activation (or CaSpkP if not Super)
type PulvParams struct {
- // [def: 0.1] [min: 0.0] multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit.
- DriveScale float32 `def:"0.1" min:"0.0" desc:"multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit."`
+ // multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit.
+ DriveScale float32 `def:"0.1" min:"0.0"`
- // [def: 0.6] [min: 0.01] Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning.
- FullDriveAct float32 `def:"0.6" min:"0.01" desc:"Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning."`
+ // Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning.
+ FullDriveAct float32 `def:"0.6" min:"0.01"`
// index of layer that generates the driving activity into this one -- set via SetBuildConfig(DriveLayName) setting
- DriveLayIdx int32 `inactive:"+" desc:"index of layer that generates the driving activity into this one -- set via SetBuildConfig(DriveLayName) setting"`
+ DriveLayIdx int32 `inactive:"+"`
pad float32
}
diff --git a/axon/deep_net.go b/axon/deep_net.go
index 6baa5f15c..cb81fd49e 100644
--- a/axon/deep_net.go
+++ b/axon/deep_net.go
@@ -8,9 +8,9 @@ import (
"fmt"
"strings"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
"golang.org/x/exp/maps"
)
diff --git a/axon/enumgen.go b/axon/enumgen.go
new file mode 100644
index 000000000..d441cf0cb
--- /dev/null
+++ b/axon/enumgen.go
@@ -0,0 +1,2478 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package axon
+
+import (
+ "errors"
+ "log"
+ "strconv"
+ "strings"
+
+ "goki.dev/enums"
+)
+
+var _PrjnGTypesValues = []PrjnGTypes{0, 1, 2, 3, 4}
+
+// PrjnGTypesN is the highest valid value
+// for type PrjnGTypes, plus one.
+const PrjnGTypesN PrjnGTypes = 5
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _PrjnGTypesNoOp() {
+ var x [1]struct{}
+ _ = x[ExcitatoryG-(0)]
+ _ = x[InhibitoryG-(1)]
+ _ = x[ModulatoryG-(2)]
+ _ = x[MaintG-(3)]
+ _ = x[ContextG-(4)]
+}
+
+var _PrjnGTypesNameToValueMap = map[string]PrjnGTypes{
+ `ExcitatoryG`: 0,
+ `excitatoryg`: 0,
+ `InhibitoryG`: 1,
+ `inhibitoryg`: 1,
+ `ModulatoryG`: 2,
+ `modulatoryg`: 2,
+ `MaintG`: 3,
+ `maintg`: 3,
+ `ContextG`: 4,
+ `contextg`: 4,
+}
+
+var _PrjnGTypesDescMap = map[PrjnGTypes]string{
+ 0: `Excitatory projections drive Ge conductance on receiving neurons, which send to GiRaw and GiSyn neuron variables.`,
+ 1: `Inhibitory projections drive Gi inhibitory conductance, which send to GiRaw and GiSyn neuron variables.`,
+ 2: `Modulatory projections have a multiplicative effect on other inputs, which send to GModRaw and GModSyn neuron variables.`,
+ 3: `Maintenance projections drive unique set of NMDA channels that support strong active maintenance abilities. Send to GMaintRaw and GMaintSyn neuron variables.`,
+ 4: `Context projections are for inputs to CT layers, which update only at the end of the plus phase, and send to CtxtGe.`,
+}
+
+var _PrjnGTypesMap = map[PrjnGTypes]string{
+ 0: `ExcitatoryG`,
+ 1: `InhibitoryG`,
+ 2: `ModulatoryG`,
+ 3: `MaintG`,
+ 4: `ContextG`,
+}
+
+// String returns the string representation
+// of this PrjnGTypes value.
+func (i PrjnGTypes) String() string {
+ if str, ok := _PrjnGTypesMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the PrjnGTypes value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *PrjnGTypes) SetString(s string) error {
+ if val, ok := _PrjnGTypesNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _PrjnGTypesNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type PrjnGTypes")
+}
+
+// Int64 returns the PrjnGTypes value as an int64.
+func (i PrjnGTypes) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the PrjnGTypes value from an int64.
+func (i *PrjnGTypes) SetInt64(in int64) {
+ *i = PrjnGTypes(in)
+}
+
+// Desc returns the description of the PrjnGTypes value.
+func (i PrjnGTypes) Desc() string {
+ if str, ok := _PrjnGTypesDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// PrjnGTypesValues returns all possible values
+// for the type PrjnGTypes.
+func PrjnGTypesValues() []PrjnGTypes {
+ return _PrjnGTypesValues
+}
+
+// Values returns all possible values
+// for the type PrjnGTypes.
+func (i PrjnGTypes) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_PrjnGTypesValues))
+ for i, d := range _PrjnGTypesValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type PrjnGTypes.
+func (i PrjnGTypes) IsValid() bool {
+ _, ok := _PrjnGTypesMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i PrjnGTypes) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *PrjnGTypes) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _GlobalVarsValues = []GlobalVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49}
+
+// GlobalVarsN is the highest valid value
+// for type GlobalVars, plus one.
+const GlobalVarsN GlobalVars = 50
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _GlobalVarsNoOp() {
+ var x [1]struct{}
+ _ = x[GvRew-(0)]
+ _ = x[GvHasRew-(1)]
+ _ = x[GvRewPred-(2)]
+ _ = x[GvPrevPred-(3)]
+ _ = x[GvHadRew-(4)]
+ _ = x[GvDA-(5)]
+ _ = x[GvACh-(6)]
+ _ = x[GvNE-(7)]
+ _ = x[GvSer-(8)]
+ _ = x[GvAChRaw-(9)]
+ _ = x[GvNotMaint-(10)]
+ _ = x[GvVSMatrixJustGated-(11)]
+ _ = x[GvVSMatrixHasGated-(12)]
+ _ = x[GvCuriosityPoolGated-(13)]
+ _ = x[GvTime-(14)]
+ _ = x[GvEffort-(15)]
+ _ = x[GvUrgencyRaw-(16)]
+ _ = x[GvUrgency-(17)]
+ _ = x[GvHasPosUS-(18)]
+ _ = x[GvHadPosUS-(19)]
+ _ = x[GvNegUSOutcome-(20)]
+ _ = x[GvHadNegUSOutcome-(21)]
+ _ = x[GvPVposSum-(22)]
+ _ = x[GvPVpos-(23)]
+ _ = x[GvPVnegSum-(24)]
+ _ = x[GvPVneg-(25)]
+ _ = x[GvPVposEst-(26)]
+ _ = x[GvPVposEstSum-(27)]
+ _ = x[GvPVposEstDisc-(28)]
+ _ = x[GvGiveUpDiff-(29)]
+ _ = x[GvGiveUpProb-(30)]
+ _ = x[GvGiveUp-(31)]
+ _ = x[GvGaveUp-(32)]
+ _ = x[GvVSPatchPos-(33)]
+ _ = x[GvVSPatchPosPrev-(34)]
+ _ = x[GvVSPatchPosSum-(35)]
+ _ = x[GvLHbDip-(36)]
+ _ = x[GvLHbBurst-(37)]
+ _ = x[GvLHbPVDA-(38)]
+ _ = x[GvCeMpos-(39)]
+ _ = x[GvCeMneg-(40)]
+ _ = x[GvVtaDA-(41)]
+ _ = x[GvUSneg-(42)]
+ _ = x[GvUSnegRaw-(43)]
+ _ = x[GvDrives-(44)]
+ _ = x[GvUSpos-(45)]
+ _ = x[GvVSPatch-(46)]
+ _ = x[GvVSPatchPrev-(47)]
+ _ = x[GvOFCposUSPTMaint-(48)]
+ _ = x[GvVSMatrixPoolGated-(49)]
+}
+
+var _GlobalVarsNameToValueMap = map[string]GlobalVars{
+ `GvRew`: 0,
+ `gvrew`: 0,
+ `GvHasRew`: 1,
+ `gvhasrew`: 1,
+ `GvRewPred`: 2,
+ `gvrewpred`: 2,
+ `GvPrevPred`: 3,
+ `gvprevpred`: 3,
+ `GvHadRew`: 4,
+ `gvhadrew`: 4,
+ `GvDA`: 5,
+ `gvda`: 5,
+ `GvACh`: 6,
+ `gvach`: 6,
+ `GvNE`: 7,
+ `gvne`: 7,
+ `GvSer`: 8,
+ `gvser`: 8,
+ `GvAChRaw`: 9,
+ `gvachraw`: 9,
+ `GvNotMaint`: 10,
+ `gvnotmaint`: 10,
+ `GvVSMatrixJustGated`: 11,
+ `gvvsmatrixjustgated`: 11,
+ `GvVSMatrixHasGated`: 12,
+ `gvvsmatrixhasgated`: 12,
+ `GvCuriosityPoolGated`: 13,
+ `gvcuriositypoolgated`: 13,
+ `GvTime`: 14,
+ `gvtime`: 14,
+ `GvEffort`: 15,
+ `gveffort`: 15,
+ `GvUrgencyRaw`: 16,
+ `gvurgencyraw`: 16,
+ `GvUrgency`: 17,
+ `gvurgency`: 17,
+ `GvHasPosUS`: 18,
+ `gvhasposus`: 18,
+ `GvHadPosUS`: 19,
+ `gvhadposus`: 19,
+ `GvNegUSOutcome`: 20,
+ `gvnegusoutcome`: 20,
+ `GvHadNegUSOutcome`: 21,
+ `gvhadnegusoutcome`: 21,
+ `GvPVposSum`: 22,
+ `gvpvpossum`: 22,
+ `GvPVpos`: 23,
+ `gvpvpos`: 23,
+ `GvPVnegSum`: 24,
+ `gvpvnegsum`: 24,
+ `GvPVneg`: 25,
+ `gvpvneg`: 25,
+ `GvPVposEst`: 26,
+ `gvpvposest`: 26,
+ `GvPVposEstSum`: 27,
+ `gvpvposestsum`: 27,
+ `GvPVposEstDisc`: 28,
+ `gvpvposestdisc`: 28,
+ `GvGiveUpDiff`: 29,
+ `gvgiveupdiff`: 29,
+ `GvGiveUpProb`: 30,
+ `gvgiveupprob`: 30,
+ `GvGiveUp`: 31,
+ `gvgiveup`: 31,
+ `GvGaveUp`: 32,
+ `gvgaveup`: 32,
+ `GvVSPatchPos`: 33,
+ `gvvspatchpos`: 33,
+ `GvVSPatchPosPrev`: 34,
+ `gvvspatchposprev`: 34,
+ `GvVSPatchPosSum`: 35,
+ `gvvspatchpossum`: 35,
+ `GvLHbDip`: 36,
+ `gvlhbdip`: 36,
+ `GvLHbBurst`: 37,
+ `gvlhbburst`: 37,
+ `GvLHbPVDA`: 38,
+ `gvlhbpvda`: 38,
+ `GvCeMpos`: 39,
+ `gvcempos`: 39,
+ `GvCeMneg`: 40,
+ `gvcemneg`: 40,
+ `GvVtaDA`: 41,
+ `gvvtada`: 41,
+ `GvUSneg`: 42,
+ `gvusneg`: 42,
+ `GvUSnegRaw`: 43,
+ `gvusnegraw`: 43,
+ `GvDrives`: 44,
+ `gvdrives`: 44,
+ `GvUSpos`: 45,
+ `gvuspos`: 45,
+ `GvVSPatch`: 46,
+ `gvvspatch`: 46,
+ `GvVSPatchPrev`: 47,
+ `gvvspatchprev`: 47,
+ `GvOFCposUSPTMaint`: 48,
+ `gvofcposusptmaint`: 48,
+ `GvVSMatrixPoolGated`: 49,
+ `gvvsmatrixpoolgated`: 49,
+}
+
+var _GlobalVarsDescMap = map[GlobalVars]string{
+ 0: `Rew is reward value -- this is set here in the Context struct, and the RL Rew layer grabs it from there -- must also set HasRew flag when rew is set -- otherwise is ignored.`,
+ 1: `HasRew must be set to true when a reward is present -- otherwise Rew is ignored. Also set when PVLV BOA model gives up. This drives ACh release in the PVLV model.`,
+ 2: `RewPred is reward prediction -- computed by a special reward prediction layer`,
+ 3: `PrevPred is previous time step reward prediction -- e.g., for TDPredLayer`,
+ 4: `HadRew is HasRew state from the previous trial -- copied from HasRew in NewState -- used for updating Effort, Urgency at start of new trial`,
+ 5: `DA is dopamine -- represents reward prediction error, signaled as phasic increases or decreases in activity relative to a tonic baseline, which is represented by a value of 0. Released by the VTA -- ventral tegmental area, or SNc -- substantia nigra pars compacta.`,
+ 6: `ACh is acetylcholine -- activated by salient events, particularly at the onset of a reward / punishment outcome (US), or onset of a conditioned stimulus (CS). Driven by BLA -> PPtg that detects changes in BLA activity, via LDTLayer type`,
+ 7: `NE is norepinepherine -- not yet in use`,
+ 8: `Ser is serotonin -- not yet in use`,
+ 9: `AChRaw is raw ACh value used in updating global ACh value by LDTLayer`,
+ 10: `NotMaint is activity of the PTNotMaintLayer -- drives top-down inhibition of LDT layer / ACh activity.`,
+ 11: `VSMatrixJustGated is VSMatrix just gated (to engage goal maintenance in PFC areas), set at end of plus phase -- this excludes any gating happening at time of US`,
+ 12: `VSMatrixHasGated is VSMatrix has gated since the last time HasRew was set (US outcome received or expected one failed to be received`,
+ 13: `CuriosityPoolGated is true if VSMatrixJustGated and the first pool representing the curiosity / novelty drive gated -- this can change the giving up Effort.Max parameter.`,
+ 14: `Time is raw time counter, incrementing upward during goal engaged window. This is also copied directly into NegUS[0] which tracks time, but we maintain a separate effort value to make it clearer.`,
+ 15: `Effort is raw effort counter -- incrementing upward for each effort step during goal engaged window. This is also copied directly into NegUS[1] which tracks effort, but we maintain a separate effort value to make it clearer.`,
+ 16: `UrgencyRaw is raw effort for urgency -- incrementing upward from effort increments per step when _not_ goal engaged`,
+ 17: `Urgency is the overall urgency activity level (normalized 0-1), computed from logistic function of GvUrgencyRaw`,
+ 18: `HasPosUS indicates has positive US on this trial -- drives goal accomplishment logic and gating.`,
+ 19: `HadPosUS is state from the previous trial (copied from HasPosUS in NewState).`,
+ 20: `NegUSOutcome indicates that a strong negative US stimulus was experienced, driving phasic ACh, VSMatrix gating to reset current goal engaged plan (if any), and phasic dopamine based on the outcome.`,
+ 21: `HadNegUSOutcome is state from the previous trial (copied from NegUSOutcome in NewState)`,
+ 22: `PVposSum is total weighted positive valence primary value = sum of Weight * USpos * Drive`,
+ 23: `PVpos is normalized positive valence primary value = (1 - 1/(1+PVposGain * PVposSum))`,
+ 24: `PVnegSum is total weighted negative valence primary value = sum of Weight * USneg`,
+ 25: `PVpos is normalized negative valence primary value = (1 - 1/(1+PVnegGain * PVnegSum))`,
+ 26: `PVposEst is the estimated PVpos value based on OFCposUSPT and VSMatrix gating`,
+ 27: `PVposEstSum is the sum that goes into computing estimated PVpos value based on OFCposUSPT and VSMatrix gating`,
+ 28: `PVposEstDisc is the discounted version of PVposEst, subtracting VSPatchPosSum, which represents the accumulated expectation of PVpos to this point.`,
+ 29: `GiveUpDiff is the difference: PVposEstDisc - PVneg representing the expected positive outcome up to this point. When this turns negative, the chance of giving up goes up proportionally, as a logistic function of this difference.`,
+ 30: `GiveUpProb is the probability from the logistic function of GiveUpDiff`,
+ 31: `GiveUp is true if a reset was triggered probabilistically based on GiveUpProb`,
+ 32: `GaveUp is copy of GiveUp from previous trial`,
+ 33: `VSPatchPos is net shunting input from VSPatch (PosD1, named PVi in original PVLV) computed as the Max of US-specific VSPatch saved values. This is also stored as GvRewPred.`,
+ 34: `VSPatchPosPrev is the previous-trial version of VSPatchPos -- for adjusting the VSPatchThr threshold`,
+ 35: `VSPatchPosSum is the sum of VSPatchPos over goal engaged trials, representing the integrated prediction that the US is going to occur`,
+ 36: `computed LHb activity level that drives dipping / pausing of DA firing, when VSPatch pos prediction > actual PV reward drive or PVneg > PVpos`,
+ 37: `LHbBurst is computed LHb activity level that drives bursts of DA firing, when actual PV reward drive > VSPatch pos prediction`,
+ 38: `LHbPVDA is GvLHbBurst - GvLHbDip -- the LHb contribution to DA, reflecting PV and VSPatch (PVi), but not the CS (LV) contributions`,
+ 39: `CeMpos is positive valence central nucleus of the amygdala (CeM) LV (learned value) activity, reflecting |BLAPosAcqD1 - BLAPosExtD2|_+ positively rectified. CeM sets Raw directly. Note that a positive US onset even with no active Drive will be reflected here, enabling learning about unexpected outcomes`,
+ 40: `CeMneg is negative valence central nucleus of the amygdala (CeM) LV (learned value) activity, reflecting |BLANegAcqD2 - BLANegExtD1|_+ positively rectified. CeM sets Raw directly`,
+ 41: `VtaDA is overall dopamine value reflecting all of the different inputs`,
+ 42: `USneg are negative valence US outcomes -- normalized version of raw, NNegUSs of them`,
+ 43: `USnegRaw are raw, linearly incremented negative valence US outcomes, this value is also integrated together with all US vals for PVneg`,
+ 44: `Drives is current drive state -- updated with optional homeostatic exponential return to baseline values`,
+ 45: `USpos is current positive-valence drive-satisfying input(s) (unconditioned stimuli = US)`,
+ 46: `VSPatch is current reward predicting VSPatch (PosD1) values`,
+ 47: `VSPatch is previous reward predicting VSPatch (PosD1) values`,
+ 48: `OFCposUSPTMaint is activity level of given OFCposUSPT maintenance pool used in anticipating potential USpos outcome value`,
+ 49: `VSMatrixPoolGated indicates whether given VSMatrix pool gated this is reset after last goal accomplished -- records gating since then.`,
+}
+
+var _GlobalVarsMap = map[GlobalVars]string{
+ 0: `GvRew`,
+ 1: `GvHasRew`,
+ 2: `GvRewPred`,
+ 3: `GvPrevPred`,
+ 4: `GvHadRew`,
+ 5: `GvDA`,
+ 6: `GvACh`,
+ 7: `GvNE`,
+ 8: `GvSer`,
+ 9: `GvAChRaw`,
+ 10: `GvNotMaint`,
+ 11: `GvVSMatrixJustGated`,
+ 12: `GvVSMatrixHasGated`,
+ 13: `GvCuriosityPoolGated`,
+ 14: `GvTime`,
+ 15: `GvEffort`,
+ 16: `GvUrgencyRaw`,
+ 17: `GvUrgency`,
+ 18: `GvHasPosUS`,
+ 19: `GvHadPosUS`,
+ 20: `GvNegUSOutcome`,
+ 21: `GvHadNegUSOutcome`,
+ 22: `GvPVposSum`,
+ 23: `GvPVpos`,
+ 24: `GvPVnegSum`,
+ 25: `GvPVneg`,
+ 26: `GvPVposEst`,
+ 27: `GvPVposEstSum`,
+ 28: `GvPVposEstDisc`,
+ 29: `GvGiveUpDiff`,
+ 30: `GvGiveUpProb`,
+ 31: `GvGiveUp`,
+ 32: `GvGaveUp`,
+ 33: `GvVSPatchPos`,
+ 34: `GvVSPatchPosPrev`,
+ 35: `GvVSPatchPosSum`,
+ 36: `GvLHbDip`,
+ 37: `GvLHbBurst`,
+ 38: `GvLHbPVDA`,
+ 39: `GvCeMpos`,
+ 40: `GvCeMneg`,
+ 41: `GvVtaDA`,
+ 42: `GvUSneg`,
+ 43: `GvUSnegRaw`,
+ 44: `GvDrives`,
+ 45: `GvUSpos`,
+ 46: `GvVSPatch`,
+ 47: `GvVSPatchPrev`,
+ 48: `GvOFCposUSPTMaint`,
+ 49: `GvVSMatrixPoolGated`,
+}
+
+// String returns the string representation
+// of this GlobalVars value.
+func (i GlobalVars) String() string {
+ if str, ok := _GlobalVarsMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the GlobalVars value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *GlobalVars) SetString(s string) error {
+ if val, ok := _GlobalVarsNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _GlobalVarsNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type GlobalVars")
+}
+
+// Int64 returns the GlobalVars value as an int64.
+func (i GlobalVars) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the GlobalVars value from an int64.
+func (i *GlobalVars) SetInt64(in int64) {
+ *i = GlobalVars(in)
+}
+
+// Desc returns the description of the GlobalVars value.
+func (i GlobalVars) Desc() string {
+ if str, ok := _GlobalVarsDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// GlobalVarsValues returns all possible values
+// for the type GlobalVars.
+func GlobalVarsValues() []GlobalVars {
+ return _GlobalVarsValues
+}
+
+// Values returns all possible values
+// for the type GlobalVars.
+func (i GlobalVars) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_GlobalVarsValues))
+ for i, d := range _GlobalVarsValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type GlobalVars.
+func (i GlobalVars) IsValid() bool {
+ _, ok := _GlobalVarsMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i GlobalVars) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *GlobalVars) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _LayerTypesValues = []LayerTypes{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}
+
+// LayerTypesN is the highest valid value
+// for type LayerTypes, plus one.
+const LayerTypesN LayerTypes = 31
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _LayerTypesNoOp() {
+ var x [1]struct{}
+ _ = x[SuperLayer-(0)]
+ _ = x[InputLayer-(1)]
+ _ = x[TargetLayer-(2)]
+ _ = x[CompareLayer-(3)]
+ _ = x[CTLayer-(4)]
+ _ = x[PulvinarLayer-(5)]
+ _ = x[TRNLayer-(6)]
+ _ = x[PTMaintLayer-(7)]
+ _ = x[PTPredLayer-(8)]
+ _ = x[PTNotMaintLayer-(9)]
+ _ = x[MatrixLayer-(10)]
+ _ = x[STNLayer-(11)]
+ _ = x[GPLayer-(12)]
+ _ = x[BGThalLayer-(13)]
+ _ = x[VSGatedLayer-(14)]
+ _ = x[BLALayer-(15)]
+ _ = x[CeMLayer-(16)]
+ _ = x[VSPatchLayer-(17)]
+ _ = x[LHbLayer-(18)]
+ _ = x[DrivesLayer-(19)]
+ _ = x[UrgencyLayer-(20)]
+ _ = x[USLayer-(21)]
+ _ = x[PVLayer-(22)]
+ _ = x[LDTLayer-(23)]
+ _ = x[VTALayer-(24)]
+ _ = x[RewLayer-(25)]
+ _ = x[RWPredLayer-(26)]
+ _ = x[RWDaLayer-(27)]
+ _ = x[TDPredLayer-(28)]
+ _ = x[TDIntegLayer-(29)]
+ _ = x[TDDaLayer-(30)]
+}
+
+var _LayerTypesNameToValueMap = map[string]LayerTypes{
+ `SuperLayer`: 0,
+ `superlayer`: 0,
+ `InputLayer`: 1,
+ `inputlayer`: 1,
+ `TargetLayer`: 2,
+ `targetlayer`: 2,
+ `CompareLayer`: 3,
+ `comparelayer`: 3,
+ `CTLayer`: 4,
+ `ctlayer`: 4,
+ `PulvinarLayer`: 5,
+ `pulvinarlayer`: 5,
+ `TRNLayer`: 6,
+ `trnlayer`: 6,
+ `PTMaintLayer`: 7,
+ `ptmaintlayer`: 7,
+ `PTPredLayer`: 8,
+ `ptpredlayer`: 8,
+ `PTNotMaintLayer`: 9,
+ `ptnotmaintlayer`: 9,
+ `MatrixLayer`: 10,
+ `matrixlayer`: 10,
+ `STNLayer`: 11,
+ `stnlayer`: 11,
+ `GPLayer`: 12,
+ `gplayer`: 12,
+ `BGThalLayer`: 13,
+ `bgthallayer`: 13,
+ `VSGatedLayer`: 14,
+ `vsgatedlayer`: 14,
+ `BLALayer`: 15,
+ `blalayer`: 15,
+ `CeMLayer`: 16,
+ `cemlayer`: 16,
+ `VSPatchLayer`: 17,
+ `vspatchlayer`: 17,
+ `LHbLayer`: 18,
+ `lhblayer`: 18,
+ `DrivesLayer`: 19,
+ `driveslayer`: 19,
+ `UrgencyLayer`: 20,
+ `urgencylayer`: 20,
+ `USLayer`: 21,
+ `uslayer`: 21,
+ `PVLayer`: 22,
+ `pvlayer`: 22,
+ `LDTLayer`: 23,
+ `ldtlayer`: 23,
+ `VTALayer`: 24,
+ `vtalayer`: 24,
+ `RewLayer`: 25,
+ `rewlayer`: 25,
+ `RWPredLayer`: 26,
+ `rwpredlayer`: 26,
+ `RWDaLayer`: 27,
+ `rwdalayer`: 27,
+ `TDPredLayer`: 28,
+ `tdpredlayer`: 28,
+ `TDIntegLayer`: 29,
+ `tdinteglayer`: 29,
+ `TDDaLayer`: 30,
+ `tddalayer`: 30,
+}
+
+var _LayerTypesDescMap = map[LayerTypes]string{
+ 0: `Super is a superficial cortical layer (lamina 2-3-4) which does not receive direct input or targets. In more generic models, it should be used as a Hidden layer, and maps onto the Hidden type in emer.LayerType.`,
+ 1: `Input is a layer that receives direct external input in its Ext inputs. Biologically, it can be a primary sensory layer, or a thalamic layer.`,
+ 2: `Target is a layer that receives direct external target inputs used for driving plus-phase learning. Simple target layers are generally not used in more biological models, which instead use predictive learning via Pulvinar or related mechanisms.`,
+ 3: `Compare is a layer that receives external comparison inputs, which drive statistics but do NOT drive activation or learning directly. It is rarely used in axon.`,
+ 4: `CT are layer 6 corticothalamic projecting neurons, which drive "top down" predictions in Pulvinar layers. They maintain information over time via stronger NMDA channels and use maintained prior state information to generate predictions about current states forming on Super layers that then drive PT (5IB) bursting activity, which are the plus-phase drivers of Pulvinar activity.`,
+ 5: `Pulvinar are thalamic relay cell neurons in the higher-order Pulvinar nucleus of the thalamus, and functionally isomorphic neurons in the MD thalamus, and potentially other areas. These cells alternately reflect predictions driven by CT projections, and actual outcomes driven by 5IB Burst activity from corresponding PT or Super layer neurons that provide strong driving inputs.`,
+ 6: `TRNLayer is thalamic reticular nucleus layer for inhibitory competition within the thalamus.`,
+ 7: `PTMaintLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that exhibit robust, stable maintenance of activity over the duration of a goal engaged window, modulated by basal ganglia (BG) disinhibitory gating, supported by strong MaintNMDA channels and recurrent excitation. The lateral PTSelfMaint projection uses MaintG to drive GMaintRaw input that feeds into the stronger, longer MaintNMDA channels, and the ThalToPT ModulatoryG projection from BGThalamus multiplicatively modulates the strength of other inputs, such that only at the time of BG gating are these strong enough to drive sustained active maintenance. Use Act.Dend.ModGain to parameterize.`,
+ 8: `PTPredLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that combine modulatory input from PTMaintLayer sustained maintenance and CTLayer dynamic predictive learning that helps to predict state changes during the period of active goal maintenance. This layer provides the primary input to VSPatch US-timing prediction layers, and other layers that require predictive dynamic`,
+ 9: `PTNotMaintLayer implements a tonically active layer that is inhibited by the PTMaintLayer, thereby providing an active representation of the *absence* of maintained PT activity, which is useful for driving appropriate actions (e.g., exploration) when not in goal-engaged mode.`,
+ 10: `MatrixLayer represents the matrisome medium spiny neurons (MSNs) that are the main Go / NoGo gating units in BG. These are strongly modulated by phasic dopamine: D1 = Go, D2 = NoGo.`,
+ 11: `STNLayer represents subthalamic nucleus neurons, with two subtypes: STNp are more strongly driven and get over bursting threshold, driving strong, rapid activation of the KCa channels, causing a long pause in firing, which creates a window during which GPe dynamics resolve Go vs. No balance. STNs are more weakly driven and thus more slowly activate KCa, resulting in a longer period of activation, during which the GPi is inhibited to prevent premature gating based only MtxGo inhibition -- gating only occurs when GPeIn signal has had a chance to integrate its MtxNo inputs.`,
+ 12: `GPLayer represents a globus pallidus layer in the BG, including: GPeOut, GPeIn, GPeTA (arkypallidal), and GPi. Typically just a single unit per Pool representing a given stripe.`,
+ 13: `BGThalLayer represents a BG gated thalamic layer, which receives BG gating in the form of an inhibitory projection from GPi. Located mainly in the Ventral thalamus: VA / VM / VL, and also parts of MD mediodorsal thalamus.`,
+ 14: `VSGated represents explicit coding of VS gating status: JustGated and HasGated (since last US or failed predicted US), For visualization and / or motor action signaling.`,
+ 15: `BLALayer represents a basolateral amygdala layer which learns to associate arbitrary stimuli (CSs) with behaviorally salient outcomes (USs)`,
+ 16: `CeMLayer represents a central nucleus of the amygdala layer.`,
+ 17: `VSPatchLayer represents a ventral striatum patch layer, which learns to represent the expected amount of dopamine reward and projects both directly with shunting inhibition to the VTA and indirectly via the LHb / RMTg to cancel phasic dopamine firing to expected rewards (i.e., reward prediction error).`,
+ 18: `LHbLayer represents the lateral habenula, which drives dipping in the VTA. It tracks the Global LHb values for visualization purposes -- updated by VTALayer.`,
+ 19: `DrivesLayer represents the Drives in PVLV framework. It tracks the Global Drives values for visualization and predictive learning purposes.`,
+ 20: `UrgencyLayer represents the Urgency factor in PVLV framework. It tracks the Global Urgency.Urge value for visualization and predictive learning purposes.`,
+ 21: `USLayer represents a US unconditioned stimulus layer (USpos or USneg). It tracks the Global USpos or USneg, for visualization and predictive learning purposes. Actual US inputs are set in PVLV.`,
+ 22: `PVLayer represents a PV primary value layer (PVpos or PVneg) representing the total primary value as a function of US inputs, drives, and effort. It tracks the Global VTA.PVpos, PVneg values for visualization and predictive learning purposes.`,
+ 23: `LDTLayer represents the laterodorsal tegmentum layer, which is the primary limbic ACh (acetylcholine) driver to other ACh: BG cholinergic interneurons (CIN) and nucleus basalis ACh areas. The phasic ACh release signals reward salient inputs from CS, US and US omssion, and it drives widespread disinhibition of BG gating and VTA DA firing. It receives excitation from superior colliculus which computes a temporal derivative (stimulus specific adaptation, SSA) of sensory inputs, and inhibitory input from OFC, ACC driving suppression of distracting inputs during goal-engaged states.`,
+ 24: `VTALayer represents the ventral tegmental area, which releases dopamine. It computes final DA value from PVLV-computed LHb PVDA (primary value DA), updated at start of each trial from updated US, Effort, etc state, and cycle-by-cycle LV learned value state reflecting CS inputs, in the Amygdala (CeM). Its activity reflects this DA level, which is effectively broadcast vial Global state values to all layers.`,
+ 25: `RewLayer represents positive or negative reward values across 2 units, showing spiking rates for each, and Act always represents signed value.`,
+ 26: `RWPredLayer computes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework). Activity is computed as linear function of excitatory conductance (which can be negative -- there are no constraints). Use with RWPrjn which does simple delta-rule learning on minus-plus.`,
+ 27: `RWDaLayer computes a dopamine (DA) signal based on a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework). It computes difference between r(t) and RWPred values. r(t) is accessed directly from a Rew layer -- if no external input then no DA is computed -- critical for effective use of RW only for PV cases. RWPred prediction is also accessed directly from Rew layer to avoid any issues.`,
+ 28: `TDPredLayer is the temporal differences reward prediction layer. It represents estimated value V(t) in the minus phase, and computes estimated V(t+1) based on its learned weights in plus phase, using the TDPredPrjn projection type for DA modulated learning.`,
+ 29: `TDIntegLayer is the temporal differences reward integration layer. It represents estimated value V(t) from prior time step in the minus phase, and estimated discount * V(t+1) + r(t) in the plus phase. It gets Rew, PrevPred from Context.NeuroMod, and Special LayerVals from TDPredLayer.`,
+ 30: `TDDaLayer computes a dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase. These are retrieved from Special LayerVals.`,
+}
+
+var _LayerTypesMap = map[LayerTypes]string{
+ 0: `SuperLayer`,
+ 1: `InputLayer`,
+ 2: `TargetLayer`,
+ 3: `CompareLayer`,
+ 4: `CTLayer`,
+ 5: `PulvinarLayer`,
+ 6: `TRNLayer`,
+ 7: `PTMaintLayer`,
+ 8: `PTPredLayer`,
+ 9: `PTNotMaintLayer`,
+ 10: `MatrixLayer`,
+ 11: `STNLayer`,
+ 12: `GPLayer`,
+ 13: `BGThalLayer`,
+ 14: `VSGatedLayer`,
+ 15: `BLALayer`,
+ 16: `CeMLayer`,
+ 17: `VSPatchLayer`,
+ 18: `LHbLayer`,
+ 19: `DrivesLayer`,
+ 20: `UrgencyLayer`,
+ 21: `USLayer`,
+ 22: `PVLayer`,
+ 23: `LDTLayer`,
+ 24: `VTALayer`,
+ 25: `RewLayer`,
+ 26: `RWPredLayer`,
+ 27: `RWDaLayer`,
+ 28: `TDPredLayer`,
+ 29: `TDIntegLayer`,
+ 30: `TDDaLayer`,
+}
+
+// String returns the string representation
+// of this LayerTypes value.
+func (i LayerTypes) String() string {
+ if str, ok := _LayerTypesMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the LayerTypes value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *LayerTypes) SetString(s string) error {
+ if val, ok := _LayerTypesNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _LayerTypesNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type LayerTypes")
+}
+
+// Int64 returns the LayerTypes value as an int64.
+func (i LayerTypes) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the LayerTypes value from an int64.
+func (i *LayerTypes) SetInt64(in int64) {
+ *i = LayerTypes(in)
+}
+
+// Desc returns the description of the LayerTypes value.
+func (i LayerTypes) Desc() string {
+ if str, ok := _LayerTypesDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// LayerTypesValues returns all possible values
+// for the type LayerTypes.
+func LayerTypesValues() []LayerTypes {
+ return _LayerTypesValues
+}
+
+// Values returns all possible values
+// for the type LayerTypes.
+func (i LayerTypes) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_LayerTypesValues))
+ for i, d := range _LayerTypesValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type LayerTypes.
+func (i LayerTypes) IsValid() bool {
+ _, ok := _LayerTypesMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i LayerTypes) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *LayerTypes) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _DAModTypesValues = []DAModTypes{0, 1, 2, 3}
+
+// DAModTypesN is the highest valid value
+// for type DAModTypes, plus one.
+const DAModTypesN DAModTypes = 4
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _DAModTypesNoOp() {
+ var x [1]struct{}
+ _ = x[NoDAMod-(0)]
+ _ = x[D1Mod-(1)]
+ _ = x[D2Mod-(2)]
+ _ = x[D1AbsMod-(3)]
+}
+
+var _DAModTypesNameToValueMap = map[string]DAModTypes{
+ `NoDAMod`: 0,
+ `nodamod`: 0,
+ `D1Mod`: 1,
+ `d1mod`: 1,
+ `D2Mod`: 2,
+ `d2mod`: 2,
+ `D1AbsMod`: 3,
+ `d1absmod`: 3,
+}
+
+var _DAModTypesDescMap = map[DAModTypes]string{
+ 0: `NoDAMod means there is no effect of dopamine on neural activity`,
+ 1: `D1Mod is for neurons that primarily express dopamine D1 receptors, which are excitatory from DA bursts, inhibitory from dips. Cortical neurons can generally use this type, while subcortical populations are more diverse in having both D1 and D2 subtypes.`,
+ 2: `D2Mod is for neurons that primarily express dopamine D2 receptors, which are excitatory from DA dips, inhibitory from bursts.`,
+ 3: `D1AbsMod is like D1Mod, except the absolute value of DA is used instead of the signed value. There are a subset of DA neurons that send increased DA for both negative and positive outcomes, targeting frontal neurons.`,
+}
+
+var _DAModTypesMap = map[DAModTypes]string{
+ 0: `NoDAMod`,
+ 1: `D1Mod`,
+ 2: `D2Mod`,
+ 3: `D1AbsMod`,
+}
+
+// String returns the string representation
+// of this DAModTypes value.
+func (i DAModTypes) String() string {
+ if str, ok := _DAModTypesMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the DAModTypes value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *DAModTypes) SetString(s string) error {
+ if val, ok := _DAModTypesNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _DAModTypesNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type DAModTypes")
+}
+
+// Int64 returns the DAModTypes value as an int64.
+func (i DAModTypes) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the DAModTypes value from an int64.
+func (i *DAModTypes) SetInt64(in int64) {
+ *i = DAModTypes(in)
+}
+
+// Desc returns the description of the DAModTypes value.
+func (i DAModTypes) Desc() string {
+ if str, ok := _DAModTypesDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// DAModTypesValues returns all possible values
+// for the type DAModTypes.
+func DAModTypesValues() []DAModTypes {
+ return _DAModTypesValues
+}
+
+// Values returns all possible values
+// for the type DAModTypes.
+func (i DAModTypes) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_DAModTypesValues))
+ for i, d := range _DAModTypesValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type DAModTypes.
+func (i DAModTypes) IsValid() bool {
+ _, ok := _DAModTypesMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i DAModTypes) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *DAModTypes) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _ValenceTypesValues = []ValenceTypes{0, 1}
+
+// ValenceTypesN is the highest valid value
+// for type ValenceTypes, plus one.
+const ValenceTypesN ValenceTypes = 2
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _ValenceTypesNoOp() {
+ var x [1]struct{}
+ _ = x[Positive-(0)]
+ _ = x[Negative-(1)]
+}
+
+var _ValenceTypesNameToValueMap = map[string]ValenceTypes{
+ `Positive`: 0,
+ `positive`: 0,
+ `Negative`: 1,
+ `negative`: 1,
+}
+
+var _ValenceTypesDescMap = map[ValenceTypes]string{
+ 0: `Positive valence codes for outcomes aligned with drives / goals.`,
+ 1: `Negative valence codes for harmful or aversive outcomes.`,
+}
+
+var _ValenceTypesMap = map[ValenceTypes]string{
+ 0: `Positive`,
+ 1: `Negative`,
+}
+
+// String returns the string representation
+// of this ValenceTypes value.
+func (i ValenceTypes) String() string {
+ if str, ok := _ValenceTypesMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the ValenceTypes value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *ValenceTypes) SetString(s string) error {
+ if val, ok := _ValenceTypesNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _ValenceTypesNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type ValenceTypes")
+}
+
+// Int64 returns the ValenceTypes value as an int64.
+func (i ValenceTypes) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the ValenceTypes value from an int64.
+func (i *ValenceTypes) SetInt64(in int64) {
+ *i = ValenceTypes(in)
+}
+
+// Desc returns the description of the ValenceTypes value.
+func (i ValenceTypes) Desc() string {
+ if str, ok := _ValenceTypesDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// ValenceTypesValues returns all possible values
+// for the type ValenceTypes.
+func ValenceTypesValues() []ValenceTypes {
+ return _ValenceTypesValues
+}
+
+// Values returns all possible values
+// for the type ValenceTypes.
+func (i ValenceTypes) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_ValenceTypesValues))
+ for i, d := range _ValenceTypesValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type ValenceTypes.
+func (i ValenceTypes) IsValid() bool {
+ _, ok := _ValenceTypesMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i ValenceTypes) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *ValenceTypes) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _NeuronFlagsValues = []NeuronFlags{1, 2, 4, 8}
+
+// NeuronFlagsN is the highest valid value
+// for type NeuronFlags, plus one.
+const NeuronFlagsN NeuronFlags = 9
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _NeuronFlagsNoOp() {
+ var x [1]struct{}
+ _ = x[NeuronOff-(1)]
+ _ = x[NeuronHasExt-(2)]
+ _ = x[NeuronHasTarg-(4)]
+ _ = x[NeuronHasCmpr-(8)]
+}
+
+var _NeuronFlagsNameToValueMap = map[string]NeuronFlags{
+ `NeuronOff`: 1,
+ `neuronoff`: 1,
+ `NeuronHasExt`: 2,
+ `neuronhasext`: 2,
+ `NeuronHasTarg`: 4,
+ `neuronhastarg`: 4,
+ `NeuronHasCmpr`: 8,
+ `neuronhascmpr`: 8,
+}
+
+var _NeuronFlagsDescMap = map[NeuronFlags]string{
+ 1: `NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned)`,
+ 2: `NeuronHasExt means the neuron has external input in its Ext field`,
+ 4: `NeuronHasTarg means the neuron has external target input in its Target field`,
+ 8: `NeuronHasCmpr means the neuron has external comparison input in its Target field -- used for computing comparison statistics but does not drive neural activity ever`,
+}
+
+var _NeuronFlagsMap = map[NeuronFlags]string{
+ 1: `NeuronOff`,
+ 2: `NeuronHasExt`,
+ 4: `NeuronHasTarg`,
+ 8: `NeuronHasCmpr`,
+}
+
+// String returns the string representation
+// of this NeuronFlags value.
+func (i NeuronFlags) String() string {
+ if str, ok := _NeuronFlagsMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the NeuronFlags value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *NeuronFlags) SetString(s string) error {
+ if val, ok := _NeuronFlagsNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _NeuronFlagsNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type NeuronFlags")
+}
+
+// Int64 returns the NeuronFlags value as an int64.
+func (i NeuronFlags) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the NeuronFlags value from an int64.
+func (i *NeuronFlags) SetInt64(in int64) {
+ *i = NeuronFlags(in)
+}
+
+// Desc returns the description of the NeuronFlags value.
+func (i NeuronFlags) Desc() string {
+ if str, ok := _NeuronFlagsDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// NeuronFlagsValues returns all possible values
+// for the type NeuronFlags.
+func NeuronFlagsValues() []NeuronFlags {
+ return _NeuronFlagsValues
+}
+
+// Values returns all possible values
+// for the type NeuronFlags.
+func (i NeuronFlags) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_NeuronFlagsValues))
+ for i, d := range _NeuronFlagsValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type NeuronFlags.
+func (i NeuronFlags) IsValid() bool {
+ _, ok := _NeuronFlagsMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i NeuronFlags) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *NeuronFlags) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _NeuronVarsValues = []NeuronVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}
+
+// NeuronVarsN is the highest valid value
+// for type NeuronVars, plus one.
+const NeuronVarsN NeuronVars = 80
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _NeuronVarsNoOp() {
+ var x [1]struct{}
+ _ = x[Spike-(0)]
+ _ = x[Spiked-(1)]
+ _ = x[Act-(2)]
+ _ = x[ActInt-(3)]
+ _ = x[ActM-(4)]
+ _ = x[ActP-(5)]
+ _ = x[Ext-(6)]
+ _ = x[Target-(7)]
+ _ = x[Ge-(8)]
+ _ = x[Gi-(9)]
+ _ = x[Gk-(10)]
+ _ = x[Inet-(11)]
+ _ = x[Vm-(12)]
+ _ = x[VmDend-(13)]
+ _ = x[ISI-(14)]
+ _ = x[ISIAvg-(15)]
+ _ = x[CaSpkP-(16)]
+ _ = x[CaSpkD-(17)]
+ _ = x[CaSyn-(18)]
+ _ = x[CaSpkM-(19)]
+ _ = x[CaSpkPM-(20)]
+ _ = x[CaLrn-(21)]
+ _ = x[NrnCaM-(22)]
+ _ = x[NrnCaP-(23)]
+ _ = x[NrnCaD-(24)]
+ _ = x[CaDiff-(25)]
+ _ = x[Attn-(26)]
+ _ = x[RLRate-(27)]
+ _ = x[SpkMaxCa-(28)]
+ _ = x[SpkMax-(29)]
+ _ = x[SpkPrv-(30)]
+ _ = x[SpkSt1-(31)]
+ _ = x[SpkSt2-(32)]
+ _ = x[GeNoiseP-(33)]
+ _ = x[GeNoise-(34)]
+ _ = x[GiNoiseP-(35)]
+ _ = x[GiNoise-(36)]
+ _ = x[GeExt-(37)]
+ _ = x[GeRaw-(38)]
+ _ = x[GeSyn-(39)]
+ _ = x[GiRaw-(40)]
+ _ = x[GiSyn-(41)]
+ _ = x[GeInt-(42)]
+ _ = x[GeIntNorm-(43)]
+ _ = x[GiInt-(44)]
+ _ = x[GModRaw-(45)]
+ _ = x[GModSyn-(46)]
+ _ = x[GMaintRaw-(47)]
+ _ = x[GMaintSyn-(48)]
+ _ = x[SSGi-(49)]
+ _ = x[SSGiDend-(50)]
+ _ = x[Gak-(51)]
+ _ = x[MahpN-(52)]
+ _ = x[SahpCa-(53)]
+ _ = x[SahpN-(54)]
+ _ = x[GknaMed-(55)]
+ _ = x[GknaSlow-(56)]
+ _ = x[GnmdaSyn-(57)]
+ _ = x[Gnmda-(58)]
+ _ = x[GnmdaMaint-(59)]
+ _ = x[GnmdaLrn-(60)]
+ _ = x[NmdaCa-(61)]
+ _ = x[GgabaB-(62)]
+ _ = x[GABAB-(63)]
+ _ = x[GABABx-(64)]
+ _ = x[Gvgcc-(65)]
+ _ = x[VgccM-(66)]
+ _ = x[VgccH-(67)]
+ _ = x[VgccCa-(68)]
+ _ = x[VgccCaInt-(69)]
+ _ = x[SKCaIn-(70)]
+ _ = x[SKCaR-(71)]
+ _ = x[SKCaM-(72)]
+ _ = x[Gsk-(73)]
+ _ = x[Burst-(74)]
+ _ = x[BurstPrv-(75)]
+ _ = x[CtxtGe-(76)]
+ _ = x[CtxtGeRaw-(77)]
+ _ = x[CtxtGeOrig-(78)]
+ _ = x[NrnFlags-(79)]
+}
+
+var _NeuronVarsNameToValueMap = map[string]NeuronVars{
+ `Spike`: 0,
+ `spike`: 0,
+ `Spiked`: 1,
+ `spiked`: 1,
+ `Act`: 2,
+ `act`: 2,
+ `ActInt`: 3,
+ `actint`: 3,
+ `ActM`: 4,
+ `actm`: 4,
+ `ActP`: 5,
+ `actp`: 5,
+ `Ext`: 6,
+ `ext`: 6,
+ `Target`: 7,
+ `target`: 7,
+ `Ge`: 8,
+ `ge`: 8,
+ `Gi`: 9,
+ `gi`: 9,
+ `Gk`: 10,
+ `gk`: 10,
+ `Inet`: 11,
+ `inet`: 11,
+ `Vm`: 12,
+ `vm`: 12,
+ `VmDend`: 13,
+ `vmdend`: 13,
+ `ISI`: 14,
+ `isi`: 14,
+ `ISIAvg`: 15,
+ `isiavg`: 15,
+ `CaSpkP`: 16,
+ `caspkp`: 16,
+ `CaSpkD`: 17,
+ `caspkd`: 17,
+ `CaSyn`: 18,
+ `casyn`: 18,
+ `CaSpkM`: 19,
+ `caspkm`: 19,
+ `CaSpkPM`: 20,
+ `caspkpm`: 20,
+ `CaLrn`: 21,
+ `calrn`: 21,
+ `NrnCaM`: 22,
+ `nrncam`: 22,
+ `NrnCaP`: 23,
+ `nrncap`: 23,
+ `NrnCaD`: 24,
+ `nrncad`: 24,
+ `CaDiff`: 25,
+ `cadiff`: 25,
+ `Attn`: 26,
+ `attn`: 26,
+ `RLRate`: 27,
+ `rlrate`: 27,
+ `SpkMaxCa`: 28,
+ `spkmaxca`: 28,
+ `SpkMax`: 29,
+ `spkmax`: 29,
+ `SpkPrv`: 30,
+ `spkprv`: 30,
+ `SpkSt1`: 31,
+ `spkst1`: 31,
+ `SpkSt2`: 32,
+ `spkst2`: 32,
+ `GeNoiseP`: 33,
+ `genoisep`: 33,
+ `GeNoise`: 34,
+ `genoise`: 34,
+ `GiNoiseP`: 35,
+ `ginoisep`: 35,
+ `GiNoise`: 36,
+ `ginoise`: 36,
+ `GeExt`: 37,
+ `geext`: 37,
+ `GeRaw`: 38,
+ `geraw`: 38,
+ `GeSyn`: 39,
+ `gesyn`: 39,
+ `GiRaw`: 40,
+ `giraw`: 40,
+ `GiSyn`: 41,
+ `gisyn`: 41,
+ `GeInt`: 42,
+ `geint`: 42,
+ `GeIntNorm`: 43,
+ `geintnorm`: 43,
+ `GiInt`: 44,
+ `giint`: 44,
+ `GModRaw`: 45,
+ `gmodraw`: 45,
+ `GModSyn`: 46,
+ `gmodsyn`: 46,
+ `GMaintRaw`: 47,
+ `gmaintraw`: 47,
+ `GMaintSyn`: 48,
+ `gmaintsyn`: 48,
+ `SSGi`: 49,
+ `ssgi`: 49,
+ `SSGiDend`: 50,
+ `ssgidend`: 50,
+ `Gak`: 51,
+ `gak`: 51,
+ `MahpN`: 52,
+ `mahpn`: 52,
+ `SahpCa`: 53,
+ `sahpca`: 53,
+ `SahpN`: 54,
+ `sahpn`: 54,
+ `GknaMed`: 55,
+ `gknamed`: 55,
+ `GknaSlow`: 56,
+ `gknaslow`: 56,
+ `GnmdaSyn`: 57,
+ `gnmdasyn`: 57,
+ `Gnmda`: 58,
+ `gnmda`: 58,
+ `GnmdaMaint`: 59,
+ `gnmdamaint`: 59,
+ `GnmdaLrn`: 60,
+ `gnmdalrn`: 60,
+ `NmdaCa`: 61,
+ `nmdaca`: 61,
+ `GgabaB`: 62,
+ `ggabab`: 62,
+ `GABAB`: 63,
+ `gabab`: 63,
+ `GABABx`: 64,
+ `gababx`: 64,
+ `Gvgcc`: 65,
+ `gvgcc`: 65,
+ `VgccM`: 66,
+ `vgccm`: 66,
+ `VgccH`: 67,
+ `vgcch`: 67,
+ `VgccCa`: 68,
+ `vgccca`: 68,
+ `VgccCaInt`: 69,
+ `vgcccaint`: 69,
+ `SKCaIn`: 70,
+ `skcain`: 70,
+ `SKCaR`: 71,
+ `skcar`: 71,
+ `SKCaM`: 72,
+ `skcam`: 72,
+ `Gsk`: 73,
+ `gsk`: 73,
+ `Burst`: 74,
+ `burst`: 74,
+ `BurstPrv`: 75,
+ `burstprv`: 75,
+ `CtxtGe`: 76,
+ `ctxtge`: 76,
+ `CtxtGeRaw`: 77,
+ `ctxtgeraw`: 77,
+ `CtxtGeOrig`: 78,
+ `ctxtgeorig`: 78,
+ `NrnFlags`: 79,
+ `nrnflags`: 79,
+}
+
+var _NeuronVarsDescMap = map[NeuronVars]string{
+ 0: `Spike is whether neuron has spiked or not on this cycle (0 or 1)`,
+ 1: `Spiked is 1 if neuron has spiked within the last 10 cycles (msecs), corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise -- useful for visualization and computing activity levels in terms of average spiked levels.`,
+ 2: `Act is rate-coded activation value reflecting instantaneous estimated rate of spiking, based on 1 / ISIAvg. This drives feedback inhibition in the FFFB function (todo: this will change when better inhibition is implemented), and is integrated over time for ActInt which is then used for performance statistics and layer average activations, etc. Should not be used for learning or other computations.`,
+ 3: `ActInt is integrated running-average activation value computed from Act with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall activation state across the ThetaCycle time scale, as the overall response of network to current input state -- this is copied to ActM and ActP at the ends of the minus and plus phases, respectively, and used in computing performance-level statistics (which are typically based on ActM). Should not be used for learning or other computations.`,
+ 4: `ActM is ActInt activation state at end of third quarter, representing the posterior-cortical minus phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.`,
+ 5: `ActP is ActInt activation state at end of fourth quarter, representing the posterior-cortical plus_phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.`,
+ 6: `Ext is external input: drives activation of unit from outside influences (e.g., sensory input)`,
+ 7: `Target is the target value: drives learning to produce this activation value`,
+ 8: `Ge is total excitatory conductance, including all forms of excitation (e.g., NMDA) -- does *not* include Gbar.E`,
+ 9: `Gi is total inhibitory synaptic conductance -- the net inhibitory input to the neuron -- does *not* include Gbar.I`,
+ 10: `Gk is total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects -- does *not* include Gbar.K`,
+ 11: `Inet is net current produced by all channels -- drives update of Vm`,
+ 12: `Vm is membrane potential -- integrates Inet current over time`,
+ 13: `VmDend is dendritic membrane potential -- has a slower time constant, is not subject to the VmR reset after spiking`,
+ 14: `ISI is current inter-spike-interval -- counts up since last spike. Starts at -1 when initialized.`,
+ 15: `ISIAvg is average inter-spike-interval -- average time interval between spikes, integrated with ISITau rate constant (relatively fast) to capture something close to an instantaneous spiking rate. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization.`,
+ 16: `CaSpkP is continuous cascaded integration of CaSpkM at PTau time constant (typically 40), representing neuron-level purely spiking version of plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.`,
+ 17: `CaSpkD is continuous cascaded integration CaSpkP at DTau time constant (typically 40), representing neuron-level purely spiking version of minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.`,
+ 18: `CaSyn is spike-driven calcium trace for synapse-level Ca-driven learning: exponential integration of SpikeG * Spike at SynTau time constant (typically 30). Synapses integrate send.CaSyn * recv.CaSyn across M, P, D time integrals for the synaptic trace driving credit assignment in learning. Time constant reflects binding time of Glu to NMDA and Ca buffering postsynaptically, and determines time window where pre * post spiking must overlap to drive learning.`,
+ 19: `CaSpkM is spike-driven calcium trace used as a neuron-level proxy for synpatic credit assignment factor based on continuous time-integrated spiking: exponential integration of SpikeG * Spike at MTau time constant (typically 5). Simulates a calmodulin (CaM) like signal at the most abstract level.`,
+ 20: `CaSpkPM is minus-phase snapshot of the CaSpkP value -- similar to ActM but using a more directly spike-integrated value.`,
+ 21: `CaLrn is recv neuron calcium signal used to drive temporal error difference component of standard learning rule, combining NMDA (NmdaCa) and spiking-driven VGCC (VgccCaInt) calcium sources (vs. CaSpk* which only reflects spiking component). This is integrated into CaM, CaP, CaD, and temporal derivative is CaP - CaD (CaMKII - DAPK1). This approximates the backprop error derivative on net input, but VGCC component adds a proportion of recv activation delta as well -- a balance of both works best. The synaptic-level trace multiplier provides the credit assignment factor, reflecting coincident activity and potentially integrated over longer multi-trial timescales.`,
+ 22: `NrnCaM is integrated CaLrn at MTau timescale (typically 5), simulating a calmodulin (CaM) like signal, which then drives CaP, CaD for delta signal driving error-driven learning.`,
+ 23: `NrnCaP is cascaded integration of CaM at PTau time constant (typically 40), representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule.`,
+ 24: `NrnCaD is cascaded integratoin of CaP at DTau time constant (typically 40), representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule.`,
+ 25: `CaDiff is difference between CaP - CaD -- this is the error signal that drives error-driven learning.`,
+ 26: `Attn is Attentional modulation factor, which can be set by special layers such as the TRC -- multiplies Ge`,
+ 27: `RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from the CaSpkD of recv unit, and the normalized difference CaSpkP - CaSpkD / MAX(CaSpkP - CaSpkD).`,
+ 28: `SpkMaxCa is Ca integrated like CaSpkP but only starting at MaxCycStart cycle, to prevent inclusion of carryover spiking from prior theta cycle trial -- the PTau time constant otherwise results in significant carryover. This is the input to SpkMax`,
+ 29: `SpkMax is maximum CaSpkP across one theta cycle time window (max of SpkMaxCa) -- used for specialized algorithms that have more phasic behavior within a single trial, e.g., BG Matrix layer gating. Also useful for visualization of peak activity of neurons.`,
+ 30: `SpkPrv is final CaSpkD activation state at end of previous theta cycle. used for specialized learning mechanisms that operate on delayed sending activations.`,
+ 31: `SpkSt1 is the activation state at specific time point within current state processing window (e.g., 50 msec for beta cycle within standard theta cycle), as saved by SpkSt1() function. Used for example in hippocampus for CA3, CA1 learning`,
+ 32: `SpkSt2 is the activation state at specific time point within current state processing window (e.g., 100 msec for beta cycle within standard theta cycle), as saved by SpkSt2() function. Used for example in hippocampus for CA3, CA1 learning`,
+ 33: `GeNoiseP is accumulating poisson probability factor for driving excitatory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on lambda.`,
+ 34: `GeNoise is integrated noise excitatory conductance, added into Ge`,
+ 35: `GiNoiseP is accumulating poisson probability factor for driving inhibitory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on lambda.`,
+ 36: `GiNoise is integrated noise inhibotyr conductance, added into Gi`,
+ 37: `GeExt is extra excitatory conductance added to Ge -- from Ext input, GeCtxt etc`,
+ 38: `GeRaw is raw excitatory conductance (net input) received from senders = current raw spiking drive`,
+ 39: `GeSyn is time-integrated total excitatory synaptic conductance, with an instantaneous rise time from each spike (in GeRaw) and exponential decay with Dt.GeTau, aggregated over projections -- does *not* include Gbar.E`,
+ 40: `GiRaw is raw inhibitory conductance (net input) received from senders = current raw spiking drive`,
+ 41: `GiSyn is time-integrated total inhibitory synaptic conductance, with an instantaneous rise time from each spike (in GiRaw) and exponential decay with Dt.GiTau, aggregated over projections -- does *not* include Gbar.I. This is added with computed FFFB inhibition to get the full inhibition in Gi`,
+ 42: `GeInt is integrated running-average activation value computed from Ge with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall Ge level across the ThetaCycle time scale (Ge itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall excitatory drive`,
+ 43: `GeIntNorm is normalized GeInt value (divided by the layer maximum) -- this is used for learning in layers that require learning on subthreshold activity`,
+ 44: `GiInt is integrated running-average activation value computed from GiSyn with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall synaptic Gi level across the ThetaCycle time scale (Gi itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall inhibitory drive`,
+ 45: `GModRaw is raw modulatory conductance, received from GType = ModulatoryG projections`,
+ 46: `GModSyn is syn integrated modulatory conductance, received from GType = ModulatoryG projections`,
+ 47: `GMaintRaw is raw maintenance conductance, received from GType = MaintG projections`,
+ 48: `GMaintSyn is syn integrated maintenance conductance, integrated using MaintNMDA params.`,
+ 49: `SSGi is SST+ somatostatin positive slow spiking inhibition`,
+ 50: `SSGiDend is amount of SST+ somatostatin positive slow spiking inhibition applied to dendritic Vm (VmDend)`,
+ 51: `Gak is conductance of A-type K potassium channels`,
+ 52: `MahpN is accumulating voltage-gated gating value for the medium time scale AHP`,
+ 53: `SahpCa is slowly accumulating calcium value that drives the slow AHP`,
+ 54: `SahpN is sAHP gating value`,
+ 55: `GknaMed is conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick) -- produces accommodation / adaptation of firing`,
+ 56: `GknaSlow is conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack) -- produces accommodation / adaptation of firing`,
+ 57: `GnmdaSyn is integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant`,
+ 58: `Gnmda is net postsynaptic (recv) NMDA conductance, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential`,
+ 59: `GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from GMaintSyn and GMaintRaw, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential`,
+ 60: `GnmdaLrn is learning version of integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant -- drives NmdaCa that then drives CaM for learning`,
+ 61: `NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM`,
+ 62: `GgabaB is net GABA-B conductance, after Vm gating and Gbar + Gbase -- applies to Gk, not Gi, for GIRK, with .1 reversal potential.`,
+ 63: `GABAB is GABA-B / GIRK activation -- time-integrated value with rise and decay time constants`,
+ 64: `GABABx is GABA-B / GIRK internal drive variable -- gets the raw activation and decays`,
+ 65: `Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels`,
+ 66: `VgccM is activation gate of VGCC channels`,
+ 67: `VgccH inactivation gate of VGCC channels`,
+ 68: `VgccCa is instantaneous VGCC calcium flux -- can be driven by spiking or directly from Gvgcc`,
+ 69: `VgccCaInt time-integrated VGCC calcium flux -- this is actually what drives learning`,
+ 70: `SKCaIn is intracellular calcium store level, available to be released with spiking as SKCaR, which can bind to SKCa receptors and drive K current. replenishment is a function of spiking activity being below a threshold`,
+ 71: `SKCaR released amount of intracellular calcium, from SKCaIn, as a function of spiking events. this can bind to SKCa channels and drive K currents.`,
+ 72: `SKCaM is Calcium-gated potassium channel gating factor, driven by SKCaR via a Hill equation as in chans.SKPCaParams.`,
+ 73: `Gsk is Calcium-gated potassium channel conductance as a function of Gbar * SKCaM.`,
+ 74: `Burst is 5IB bursting activation value, computed by thresholding regular CaSpkP value in Super superficial layers`,
+ 75: `BurstPrv is previous Burst bursting activation from prior time step -- used for context-based learning`,
+ 76: `CtxtGe is context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`,
+ 77: `CtxtGeRaw is raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`,
+ 78: `CtxtGeOrig is original CtxtGe value prior to any decay factor -- updates at end of plus phase.`,
+ 79: `NrnFlags are bit flags for binary state variables, which are converted to / from uint32. These need to be in Vars because they can be differential per data (for ext inputs) and are writable (indexes are read only).`,
+}
+
+var _NeuronVarsMap = map[NeuronVars]string{
+ 0: `Spike`,
+ 1: `Spiked`,
+ 2: `Act`,
+ 3: `ActInt`,
+ 4: `ActM`,
+ 5: `ActP`,
+ 6: `Ext`,
+ 7: `Target`,
+ 8: `Ge`,
+ 9: `Gi`,
+ 10: `Gk`,
+ 11: `Inet`,
+ 12: `Vm`,
+ 13: `VmDend`,
+ 14: `ISI`,
+ 15: `ISIAvg`,
+ 16: `CaSpkP`,
+ 17: `CaSpkD`,
+ 18: `CaSyn`,
+ 19: `CaSpkM`,
+ 20: `CaSpkPM`,
+ 21: `CaLrn`,
+ 22: `NrnCaM`,
+ 23: `NrnCaP`,
+ 24: `NrnCaD`,
+ 25: `CaDiff`,
+ 26: `Attn`,
+ 27: `RLRate`,
+ 28: `SpkMaxCa`,
+ 29: `SpkMax`,
+ 30: `SpkPrv`,
+ 31: `SpkSt1`,
+ 32: `SpkSt2`,
+ 33: `GeNoiseP`,
+ 34: `GeNoise`,
+ 35: `GiNoiseP`,
+ 36: `GiNoise`,
+ 37: `GeExt`,
+ 38: `GeRaw`,
+ 39: `GeSyn`,
+ 40: `GiRaw`,
+ 41: `GiSyn`,
+ 42: `GeInt`,
+ 43: `GeIntNorm`,
+ 44: `GiInt`,
+ 45: `GModRaw`,
+ 46: `GModSyn`,
+ 47: `GMaintRaw`,
+ 48: `GMaintSyn`,
+ 49: `SSGi`,
+ 50: `SSGiDend`,
+ 51: `Gak`,
+ 52: `MahpN`,
+ 53: `SahpCa`,
+ 54: `SahpN`,
+ 55: `GknaMed`,
+ 56: `GknaSlow`,
+ 57: `GnmdaSyn`,
+ 58: `Gnmda`,
+ 59: `GnmdaMaint`,
+ 60: `GnmdaLrn`,
+ 61: `NmdaCa`,
+ 62: `GgabaB`,
+ 63: `GABAB`,
+ 64: `GABABx`,
+ 65: `Gvgcc`,
+ 66: `VgccM`,
+ 67: `VgccH`,
+ 68: `VgccCa`,
+ 69: `VgccCaInt`,
+ 70: `SKCaIn`,
+ 71: `SKCaR`,
+ 72: `SKCaM`,
+ 73: `Gsk`,
+ 74: `Burst`,
+ 75: `BurstPrv`,
+ 76: `CtxtGe`,
+ 77: `CtxtGeRaw`,
+ 78: `CtxtGeOrig`,
+ 79: `NrnFlags`,
+}
+
+// String returns the string representation
+// of this NeuronVars value.
+func (i NeuronVars) String() string {
+ if str, ok := _NeuronVarsMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the NeuronVars value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *NeuronVars) SetString(s string) error {
+ if val, ok := _NeuronVarsNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _NeuronVarsNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type NeuronVars")
+}
+
+// Int64 returns the NeuronVars value as an int64.
+func (i NeuronVars) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the NeuronVars value from an int64.
+func (i *NeuronVars) SetInt64(in int64) {
+ *i = NeuronVars(in)
+}
+
+// Desc returns the description of the NeuronVars value.
+func (i NeuronVars) Desc() string {
+ if str, ok := _NeuronVarsDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// NeuronVarsValues returns all possible values
+// for the type NeuronVars.
+func NeuronVarsValues() []NeuronVars {
+ return _NeuronVarsValues
+}
+
+// Values returns all possible values
+// for the type NeuronVars.
+func (i NeuronVars) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_NeuronVarsValues))
+ for i, d := range _NeuronVarsValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type NeuronVars.
+func (i NeuronVars) IsValid() bool {
+ _, ok := _NeuronVarsMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i NeuronVars) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *NeuronVars) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _NeuronAvgVarsValues = []NeuronAvgVars{0, 1, 2, 3, 4, 5, 6}
+
+// NeuronAvgVarsN is the highest valid value
+// for type NeuronAvgVars, plus one.
+const NeuronAvgVarsN NeuronAvgVars = 7
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _NeuronAvgVarsNoOp() {
+ var x [1]struct{}
+ _ = x[ActAvg-(0)]
+ _ = x[AvgPct-(1)]
+ _ = x[TrgAvg-(2)]
+ _ = x[DTrgAvg-(3)]
+ _ = x[AvgDif-(4)]
+ _ = x[GeBase-(5)]
+ _ = x[GiBase-(6)]
+}
+
+var _NeuronAvgVarsNameToValueMap = map[string]NeuronAvgVars{
+ `ActAvg`: 0,
+ `actavg`: 0,
+ `AvgPct`: 1,
+ `avgpct`: 1,
+ `TrgAvg`: 2,
+ `trgavg`: 2,
+ `DTrgAvg`: 3,
+ `dtrgavg`: 3,
+ `AvgDif`: 4,
+ `avgdif`: 4,
+ `GeBase`: 5,
+ `gebase`: 5,
+ `GiBase`: 6,
+ `gibase`: 6,
+}
+
+var _NeuronAvgVarsDescMap = map[NeuronAvgVars]string{
+ 0: `ActAvg is average activation (of minus phase activation state) over long time intervals (time constant = Dt.LongAvgTau) -- useful for finding hog units and seeing overall distribution of activation`,
+ 1: `AvgPct is ActAvg as a proportion of overall layer activation -- this is used for synaptic scaling to match TrgAvg activation -- updated at SlowInterval intervals`,
+ 2: `TrgAvg is neuron's target average activation as a proportion of overall layer activation, assigned during weight initialization, driving synaptic scaling relative to AvgPct`,
+ 3: `DTrgAvg is change in neuron's target average activation as a result of unit-wise error gradient -- acts like a bias weight. MPI needs to share these across processors.`,
+ 4: `AvgDif is AvgPct - TrgAvg -- i.e., the error in overall activity level relative to set point for this neuron, which drives synaptic scaling -- updated at SlowInterval intervals`,
+ 5: `GeBase is baseline level of Ge, added to GeRaw, for intrinsic excitability`,
+ 6: `GiBase is baseline level of Gi, added to GiRaw, for intrinsic excitability`,
+}
+
+var _NeuronAvgVarsMap = map[NeuronAvgVars]string{
+ 0: `ActAvg`,
+ 1: `AvgPct`,
+ 2: `TrgAvg`,
+ 3: `DTrgAvg`,
+ 4: `AvgDif`,
+ 5: `GeBase`,
+ 6: `GiBase`,
+}
+
+// String returns the string representation
+// of this NeuronAvgVars value.
+func (i NeuronAvgVars) String() string {
+ if str, ok := _NeuronAvgVarsMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the NeuronAvgVars value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *NeuronAvgVars) SetString(s string) error {
+ if val, ok := _NeuronAvgVarsNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _NeuronAvgVarsNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type NeuronAvgVars")
+}
+
+// Int64 returns the NeuronAvgVars value as an int64.
+func (i NeuronAvgVars) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the NeuronAvgVars value from an int64.
+func (i *NeuronAvgVars) SetInt64(in int64) {
+ *i = NeuronAvgVars(in)
+}
+
+// Desc returns the description of the NeuronAvgVars value.
+func (i NeuronAvgVars) Desc() string {
+ if str, ok := _NeuronAvgVarsDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// NeuronAvgVarsValues returns all possible values
+// for the type NeuronAvgVars.
+func NeuronAvgVarsValues() []NeuronAvgVars {
+ return _NeuronAvgVarsValues
+}
+
+// Values returns all possible values
+// for the type NeuronAvgVars.
+func (i NeuronAvgVars) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_NeuronAvgVarsValues))
+ for i, d := range _NeuronAvgVarsValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type NeuronAvgVars.
+func (i NeuronAvgVars) IsValid() bool {
+ _, ok := _NeuronAvgVarsMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i NeuronAvgVars) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *NeuronAvgVars) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _NeuronIdxsValues = []NeuronIdxs{0, 1, 2}
+
+// NeuronIdxsN is the highest valid value
+// for type NeuronIdxs, plus one.
+const NeuronIdxsN NeuronIdxs = 3
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _NeuronIdxsNoOp() {
+ var x [1]struct{}
+ _ = x[NrnNeurIdx-(0)]
+ _ = x[NrnLayIdx-(1)]
+ _ = x[NrnSubPool-(2)]
+}
+
+var _NeuronIdxsNameToValueMap = map[string]NeuronIdxs{
+ `NrnNeurIdx`: 0,
+ `nrnneuridx`: 0,
+ `NrnLayIdx`: 1,
+ `nrnlayidx`: 1,
+ `NrnSubPool`: 2,
+ `nrnsubpool`: 2,
+}
+
+var _NeuronIdxsDescMap = map[NeuronIdxs]string{
+ 0: `NrnNeurIdx is the index of this neuron within its owning layer`,
+ 1: `NrnLayIdx is the index of the layer that this neuron belongs to, needed for neuron-level parallel code.`,
+ 2: `NrnSubPool is the index of the sub-level inhibitory pool for this neuron (only for 4D shapes, the pool (unit-group / hypercolumn) structure level). Indicies start at 1 -- 0 is layer-level pool (is 0 if no sub-pools).`,
+}
+
+var _NeuronIdxsMap = map[NeuronIdxs]string{
+ 0: `NrnNeurIdx`,
+ 1: `NrnLayIdx`,
+ 2: `NrnSubPool`,
+}
+
+// String returns the string representation
+// of this NeuronIdxs value.
+func (i NeuronIdxs) String() string {
+ if str, ok := _NeuronIdxsMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the NeuronIdxs value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *NeuronIdxs) SetString(s string) error {
+ if val, ok := _NeuronIdxsNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _NeuronIdxsNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type NeuronIdxs")
+}
+
+// Int64 returns the NeuronIdxs value as an int64.
+func (i NeuronIdxs) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the NeuronIdxs value from an int64.
+func (i *NeuronIdxs) SetInt64(in int64) {
+ *i = NeuronIdxs(in)
+}
+
+// Desc returns the description of the NeuronIdxs value.
+func (i NeuronIdxs) Desc() string {
+ if str, ok := _NeuronIdxsDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// NeuronIdxsValues returns all possible values
+// for the type NeuronIdxs.
+func NeuronIdxsValues() []NeuronIdxs {
+ return _NeuronIdxsValues
+}
+
+// Values returns all possible values
+// for the type NeuronIdxs.
+func (i NeuronIdxs) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_NeuronIdxsValues))
+ for i, d := range _NeuronIdxsValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type NeuronIdxs.
+func (i NeuronIdxs) IsValid() bool {
+ _, ok := _NeuronIdxsMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i NeuronIdxs) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *NeuronIdxs) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _GPLayerTypesValues = []GPLayerTypes{0, 1, 2, 3}
+
+// GPLayerTypesN is the highest valid value
+// for type GPLayerTypes, plus one.
+const GPLayerTypesN GPLayerTypes = 4
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _GPLayerTypesNoOp() {
+ var x [1]struct{}
+ _ = x[GPeOut-(0)]
+ _ = x[GPeIn-(1)]
+ _ = x[GPeTA-(2)]
+ _ = x[GPi-(3)]
+}
+
+var _GPLayerTypesNameToValueMap = map[string]GPLayerTypes{
+ `GPeOut`: 0,
+ `gpeout`: 0,
+ `GPeIn`: 1,
+ `gpein`: 1,
+ `GPeTA`: 2,
+ `gpeta`: 2,
+ `GPi`: 3,
+ `gpi`: 3,
+}
+
+var _GPLayerTypesDescMap = map[GPLayerTypes]string{
+ 0: `GPeOut is Outer layer of GPe neurons, receiving inhibition from MtxGo`,
+ 1: `GPeIn is Inner layer of GPe neurons, receiving inhibition from GPeOut and MtxNo`,
+ 2: `GPeTA is arkypallidal layer of GPe neurons, receiving inhibition from GPeIn and projecting inhibition to Mtx`,
+ 3: `GPi is the inner globus pallidus, functionally equivalent to SNr, receiving from MtxGo and GPeIn, and sending inhibition to VThal`,
+}
+
+var _GPLayerTypesMap = map[GPLayerTypes]string{
+ 0: `GPeOut`,
+ 1: `GPeIn`,
+ 2: `GPeTA`,
+ 3: `GPi`,
+}
+
+// String returns the string representation
+// of this GPLayerTypes value.
+func (i GPLayerTypes) String() string {
+ if str, ok := _GPLayerTypesMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the GPLayerTypes value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *GPLayerTypes) SetString(s string) error {
+ if val, ok := _GPLayerTypesNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _GPLayerTypesNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type GPLayerTypes")
+}
+
+// Int64 returns the GPLayerTypes value as an int64.
+func (i GPLayerTypes) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the GPLayerTypes value from an int64.
+func (i *GPLayerTypes) SetInt64(in int64) {
+ *i = GPLayerTypes(in)
+}
+
+// Desc returns the description of the GPLayerTypes value.
+func (i GPLayerTypes) Desc() string {
+ if str, ok := _GPLayerTypesDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// GPLayerTypesValues returns all possible values
+// for the type GPLayerTypes.
+func GPLayerTypesValues() []GPLayerTypes {
+ return _GPLayerTypesValues
+}
+
+// Values returns all possible values
+// for the type GPLayerTypes.
+func (i GPLayerTypes) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_GPLayerTypesValues))
+ for i, d := range _GPLayerTypesValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type GPLayerTypes.
+func (i GPLayerTypes) IsValid() bool {
+ _, ok := _GPLayerTypesMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i GPLayerTypes) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *GPLayerTypes) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _PrjnTypesValues = []PrjnTypes{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+
+// PrjnTypesN is the highest valid value
+// for type PrjnTypes, plus one.
+const PrjnTypesN PrjnTypes = 11
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _PrjnTypesNoOp() {
+ var x [1]struct{}
+ _ = x[ForwardPrjn-(0)]
+ _ = x[BackPrjn-(1)]
+ _ = x[LateralPrjn-(2)]
+ _ = x[InhibPrjn-(3)]
+ _ = x[CTCtxtPrjn-(4)]
+ _ = x[RWPrjn-(5)]
+ _ = x[TDPredPrjn-(6)]
+ _ = x[BLAPrjn-(7)]
+ _ = x[HipPrjn-(8)]
+ _ = x[VSPatchPrjn-(9)]
+ _ = x[MatrixPrjn-(10)]
+}
+
+var _PrjnTypesNameToValueMap = map[string]PrjnTypes{
+ `ForwardPrjn`: 0,
+ `forwardprjn`: 0,
+ `BackPrjn`: 1,
+ `backprjn`: 1,
+ `LateralPrjn`: 2,
+ `lateralprjn`: 2,
+ `InhibPrjn`: 3,
+ `inhibprjn`: 3,
+ `CTCtxtPrjn`: 4,
+ `ctctxtprjn`: 4,
+ `RWPrjn`: 5,
+ `rwprjn`: 5,
+ `TDPredPrjn`: 6,
+ `tdpredprjn`: 6,
+ `BLAPrjn`: 7,
+ `blaprjn`: 7,
+ `HipPrjn`: 8,
+ `hipprjn`: 8,
+ `VSPatchPrjn`: 9,
+ `vspatchprjn`: 9,
+ `MatrixPrjn`: 10,
+ `matrixprjn`: 10,
+}
+
+var _PrjnTypesDescMap = map[PrjnTypes]string{
+ 0: `Forward is a feedforward, bottom-up projection from sensory inputs to higher layers`,
+ 1: `Back is a feedback, top-down projection from higher layers back to lower layers`,
+ 2: `Lateral is a lateral projection within the same layer / area`,
+ 3: `Inhib is an inhibitory projection that drives inhibitory synaptic conductances instead of the default excitatory ones.`,
+ 4: `CTCtxt are projections from Superficial layers to CT layers that send Burst activations drive updating of CtxtGe excitatory conductance, at end of plus (51B Bursting) phase. Biologically, this projection comes from the PT layer 5IB neurons, but it is simpler to use the Super neurons directly, and PT are optional for most network types. These projections also use a special learning rule that takes into account the temporal delays in the activation states. Can also add self context from CT for deeper temporal context.`,
+ 5: `RWPrjn does dopamine-modulated learning for reward prediction: Da * Send.CaSpkP (integrated current spiking activity). Uses RLPredPrjn parameters. Use in RWPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`,
+ 6: `TDPredPrjn does dopamine-modulated learning for reward prediction: DWt = Da * Send.SpkPrv (activity on *previous* timestep) Uses RLPredPrjn parameters. Use in TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`,
+ 7: `BLAPrjn implements the PVLV BLA learning rule: dW = ACh * X_t-1 * (Y_t - Y_t-1) The recv delta is across trials, where the US should activate on trial boundary, to enable sufficient time for gating through to OFC, so BLA initially learns based on US present - US absent. It can also learn based on CS onset if there is a prior CS that predicts that.`,
+ 8: ``,
+ 9: `VSPatchPrjn implements the VSPatch learning rule: dW = ACh * DA * X * Y where DA is D1 vs. D2 modulated DA level, X = sending activity factor, Y = receiving activity factor, and ACh provides overall modulation.`,
+ 10: `MatrixPrjn supports trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`,
+}
+
+var _PrjnTypesMap = map[PrjnTypes]string{
+ 0: `ForwardPrjn`,
+ 1: `BackPrjn`,
+ 2: `LateralPrjn`,
+ 3: `InhibPrjn`,
+ 4: `CTCtxtPrjn`,
+ 5: `RWPrjn`,
+ 6: `TDPredPrjn`,
+ 7: `BLAPrjn`,
+ 8: `HipPrjn`,
+ 9: `VSPatchPrjn`,
+ 10: `MatrixPrjn`,
+}
+
+// String returns the string representation
+// of this PrjnTypes value.
+func (i PrjnTypes) String() string {
+ if str, ok := _PrjnTypesMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the PrjnTypes value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *PrjnTypes) SetString(s string) error {
+ if val, ok := _PrjnTypesNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _PrjnTypesNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type PrjnTypes")
+}
+
+// Int64 returns the PrjnTypes value as an int64.
+func (i PrjnTypes) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the PrjnTypes value from an int64.
+func (i *PrjnTypes) SetInt64(in int64) {
+ *i = PrjnTypes(in)
+}
+
+// Desc returns the description of the PrjnTypes value.
+func (i PrjnTypes) Desc() string {
+ if str, ok := _PrjnTypesDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// PrjnTypesValues returns all possible values
+// for the type PrjnTypes.
+func PrjnTypesValues() []PrjnTypes {
+ return _PrjnTypesValues
+}
+
+// Values returns all possible values
+// for the type PrjnTypes.
+func (i PrjnTypes) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_PrjnTypesValues))
+ for i, d := range _PrjnTypesValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type PrjnTypes.
+func (i PrjnTypes) IsValid() bool {
+ _, ok := _PrjnTypesMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i PrjnTypes) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *PrjnTypes) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _SynapseVarsValues = []SynapseVars{0, 1, 2, 3, 4}
+
+// SynapseVarsN is the highest valid value
+// for type SynapseVars, plus one.
+const SynapseVarsN SynapseVars = 5
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _SynapseVarsNoOp() {
+ var x [1]struct{}
+ _ = x[Wt-(0)]
+ _ = x[LWt-(1)]
+ _ = x[SWt-(2)]
+ _ = x[DWt-(3)]
+ _ = x[DSWt-(4)]
+}
+
+var _SynapseVarsNameToValueMap = map[string]SynapseVars{
+ `Wt`: 0,
+ `wt`: 0,
+ `LWt`: 1,
+ `lwt`: 1,
+ `SWt`: 2,
+ `swt`: 2,
+ `DWt`: 3,
+ `dwt`: 3,
+ `DSWt`: 4,
+ `dswt`: 4,
+}
+
+var _SynapseVarsDescMap = map[SynapseVars]string{
+ 0: `Wt is effective synaptic weight value, determining how much conductance one spike drives on the receiving neuron, representing the actual number of effective AMPA receptors in the synapse. Wt = SWt * WtSig(LWt), where WtSig produces values between 0-2 based on LWt, centered on 1.`,
+ 1: `LWt is rapidly learning, linear weight value -- learns according to the lrate specified in the connection spec. Biologically, this represents the internal biochemical processes that drive the trafficking of AMPA receptors in the synaptic density. Initially all LWt are .5, which gives 1 from WtSig function.`,
+ 2: `SWt is slowly adapting structural weight value, which acts as a multiplicative scaling factor on synaptic efficacy: biologically represents the physical size and efficacy of the dendritic spine. SWt values adapt in an outer loop along with synaptic scaling, with constraints to prevent runaway positive feedback loops and maintain variance and further capacity to learn. Initial variance is all in SWt, with LWt set to .5, and scaling absorbs some of LWt into SWt.`,
+ 3: `DWt is delta (change in) synaptic weight, from learning -- updates LWt which then updates Wt.`,
+ 4: `DSWt is change in SWt slow synaptic weight -- accumulates DWt`,
+}
+
+var _SynapseVarsMap = map[SynapseVars]string{
+ 0: `Wt`,
+ 1: `LWt`,
+ 2: `SWt`,
+ 3: `DWt`,
+ 4: `DSWt`,
+}
+
+// String returns the string representation
+// of this SynapseVars value.
+func (i SynapseVars) String() string {
+ if str, ok := _SynapseVarsMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the SynapseVars value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *SynapseVars) SetString(s string) error {
+ if val, ok := _SynapseVarsNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _SynapseVarsNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type SynapseVars")
+}
+
+// Int64 returns the SynapseVars value as an int64.
+func (i SynapseVars) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the SynapseVars value from an int64.
+func (i *SynapseVars) SetInt64(in int64) {
+ *i = SynapseVars(in)
+}
+
+// Desc returns the description of the SynapseVars value.
+func (i SynapseVars) Desc() string {
+ if str, ok := _SynapseVarsDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// SynapseVarsValues returns all possible values
+// for the type SynapseVars.
+func SynapseVarsValues() []SynapseVars {
+ return _SynapseVarsValues
+}
+
+// Values returns all possible values
+// for the type SynapseVars.
+func (i SynapseVars) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_SynapseVarsValues))
+ for i, d := range _SynapseVarsValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type SynapseVars.
+func (i SynapseVars) IsValid() bool {
+ _, ok := _SynapseVarsMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i SynapseVars) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *SynapseVars) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _SynapseCaVarsValues = []SynapseCaVars{0, 1, 2, 3, 4, 5, 6}
+
+// SynapseCaVarsN is the highest valid value
+// for type SynapseCaVars, plus one.
+const SynapseCaVarsN SynapseCaVars = 7
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _SynapseCaVarsNoOp() {
+ var x [1]struct{}
+ _ = x[CaM-(0)]
+ _ = x[CaP-(1)]
+ _ = x[CaD-(2)]
+ _ = x[CaUpT-(3)]
+ _ = x[Tr-(4)]
+ _ = x[DTr-(5)]
+ _ = x[DiDWt-(6)]
+}
+
+var _SynapseCaVarsNameToValueMap = map[string]SynapseCaVars{
+ `CaM`: 0,
+ `cam`: 0,
+ `CaP`: 1,
+ `cap`: 1,
+ `CaD`: 2,
+ `cad`: 2,
+ `CaUpT`: 3,
+ `caupt`: 3,
+ `Tr`: 4,
+ `tr`: 4,
+ `DTr`: 5,
+ `dtr`: 5,
+ `DiDWt`: 6,
+ `didwt`: 6,
+}
+
+var _SynapseCaVarsDescMap = map[SynapseCaVars]string{
+ 0: `CaM is first stage running average (mean) Ca calcium level (like CaM = calmodulin), feeds into CaP`,
+ 1: `CaP is shorter timescale integrated CaM value, representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule`,
+ 2: `CaD is longer timescale integrated CaP value, representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule`,
+ 3: `CaUpT is time in CyclesTotal of last updating of Ca values at the synapse level, for optimized synaptic-level Ca integration -- converted to / from uint32`,
+ 4: `Tr is trace of synaptic activity over time -- used for credit assignment in learning. In MatrixPrjn this is a tag that is then updated later when US occurs.`,
+ 5: `DTr is delta (change in) Tr trace of synaptic activity over time`,
+ 6: `DiDWt is delta weight for each data parallel index (Di) -- this is directly computed from the Ca values (in cortical version) and then aggregated into the overall DWt (which may be further integrated across MPI nodes), which then drives changes in Wt values`,
+}
+
+var _SynapseCaVarsMap = map[SynapseCaVars]string{
+ 0: `CaM`,
+ 1: `CaP`,
+ 2: `CaD`,
+ 3: `CaUpT`,
+ 4: `Tr`,
+ 5: `DTr`,
+ 6: `DiDWt`,
+}
+
+// String returns the string representation
+// of this SynapseCaVars value.
+func (i SynapseCaVars) String() string {
+ if str, ok := _SynapseCaVarsMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the SynapseCaVars value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *SynapseCaVars) SetString(s string) error {
+ if val, ok := _SynapseCaVarsNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _SynapseCaVarsNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type SynapseCaVars")
+}
+
+// Int64 returns the SynapseCaVars value as an int64.
+func (i SynapseCaVars) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the SynapseCaVars value from an int64.
+func (i *SynapseCaVars) SetInt64(in int64) {
+ *i = SynapseCaVars(in)
+}
+
+// Desc returns the description of the SynapseCaVars value.
+func (i SynapseCaVars) Desc() string {
+ if str, ok := _SynapseCaVarsDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// SynapseCaVarsValues returns all possible values
+// for the type SynapseCaVars.
+func SynapseCaVarsValues() []SynapseCaVars {
+ return _SynapseCaVarsValues
+}
+
+// Values returns all possible values
+// for the type SynapseCaVars.
+func (i SynapseCaVars) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_SynapseCaVarsValues))
+ for i, d := range _SynapseCaVarsValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type SynapseCaVars.
+func (i SynapseCaVars) IsValid() bool {
+ _, ok := _SynapseCaVarsMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i SynapseCaVars) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *SynapseCaVars) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _SynapseIdxsValues = []SynapseIdxs{0, 1, 2}
+
+// SynapseIdxsN is the highest valid value
+// for type SynapseIdxs, plus one.
+const SynapseIdxsN SynapseIdxs = 3
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _SynapseIdxsNoOp() {
+ var x [1]struct{}
+ _ = x[SynRecvIdx-(0)]
+ _ = x[SynSendIdx-(1)]
+ _ = x[SynPrjnIdx-(2)]
+}
+
+var _SynapseIdxsNameToValueMap = map[string]SynapseIdxs{
+ `SynRecvIdx`: 0,
+ `synrecvidx`: 0,
+ `SynSendIdx`: 1,
+ `synsendidx`: 1,
+ `SynPrjnIdx`: 2,
+ `synprjnidx`: 2,
+}
+
+var _SynapseIdxsDescMap = map[SynapseIdxs]string{
+ 0: `SynRecvIdx is receiving neuron index in network's global list of neurons`,
+ 1: `SynSendIdx is sending neuron index in network's global list of neurons`,
+ 2: `SynPrjnIdx is projection index in global list of projections organized as [Layers][RecvPrjns]`,
+}
+
+var _SynapseIdxsMap = map[SynapseIdxs]string{
+ 0: `SynRecvIdx`,
+ 1: `SynSendIdx`,
+ 2: `SynPrjnIdx`,
+}
+
+// String returns the string representation
+// of this SynapseIdxs value.
+func (i SynapseIdxs) String() string {
+ if str, ok := _SynapseIdxsMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the SynapseIdxs value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *SynapseIdxs) SetString(s string) error {
+ if val, ok := _SynapseIdxsNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _SynapseIdxsNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type SynapseIdxs")
+}
+
+// Int64 returns the SynapseIdxs value as an int64.
+func (i SynapseIdxs) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the SynapseIdxs value from an int64.
+func (i *SynapseIdxs) SetInt64(in int64) {
+ *i = SynapseIdxs(in)
+}
+
+// Desc returns the description of the SynapseIdxs value.
+func (i SynapseIdxs) Desc() string {
+ if str, ok := _SynapseIdxsDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// SynapseIdxsValues returns all possible values
+// for the type SynapseIdxs.
+func SynapseIdxsValues() []SynapseIdxs {
+ return _SynapseIdxsValues
+}
+
+// Values returns all possible values
+// for the type SynapseIdxs.
+func (i SynapseIdxs) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_SynapseIdxsValues))
+ for i, d := range _SynapseIdxsValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type SynapseIdxs.
+func (i SynapseIdxs) IsValid() bool {
+ _, ok := _SynapseIdxsMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i SynapseIdxs) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *SynapseIdxs) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
diff --git a/axon/globals.go b/axon/globals.go
index 8e0bc05e3..50b38984b 100644
--- a/axon/globals.go
+++ b/axon/globals.go
@@ -4,20 +4,11 @@
package axon
-import "github.com/goki/ki/kit"
-
-//go:generate stringer -type=GlobalVars
-
-var KiT_GlobalVars = kit.Enums.AddEnum(GlobalVarsN, kit.NotBitFlag, nil)
-
-func (ev GlobalVars) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *GlobalVars) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
-
//gosl: start globals
// GlobalVars are network-wide variables, such as neuromodulators, reward, drives, etc
// including the state for the PVLV phasic dopamine model.
-type GlobalVars int32
+type GlobalVars int32 //enums:enum
const (
/////////////////////////////////////////
@@ -229,7 +220,13 @@ const (
// this is reset after last goal accomplished -- records gating since then.
GvVSMatrixPoolGated
- GlobalVarsN
+ // IMPORTANT: if GvVSMatrixPoolGated is not the last, need to update gosl defn below
)
//gosl: end globals
+
+//gosl: hlsl globals
+/*
+static const GlobalVars GlobalVarsN = GvVSMatrixPoolGated + 1;
+*/
+//gosl: end globals
diff --git a/axon/globalvars_string.go b/axon/globalvars_string.go
deleted file mode 100644
index b059e107d..000000000
--- a/axon/globalvars_string.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Code generated by "stringer -type=GlobalVars"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[GvRew-0]
- _ = x[GvHasRew-1]
- _ = x[GvRewPred-2]
- _ = x[GvPrevPred-3]
- _ = x[GvHadRew-4]
- _ = x[GvDA-5]
- _ = x[GvACh-6]
- _ = x[GvNE-7]
- _ = x[GvSer-8]
- _ = x[GvAChRaw-9]
- _ = x[GvNotMaint-10]
- _ = x[GvVSMatrixJustGated-11]
- _ = x[GvVSMatrixHasGated-12]
- _ = x[GvCuriosityPoolGated-13]
- _ = x[GvTime-14]
- _ = x[GvEffort-15]
- _ = x[GvUrgencyRaw-16]
- _ = x[GvUrgency-17]
- _ = x[GvHasPosUS-18]
- _ = x[GvHadPosUS-19]
- _ = x[GvNegUSOutcome-20]
- _ = x[GvHadNegUSOutcome-21]
- _ = x[GvPVposSum-22]
- _ = x[GvPVpos-23]
- _ = x[GvPVnegSum-24]
- _ = x[GvPVneg-25]
- _ = x[GvPVposEst-26]
- _ = x[GvPVposEstSum-27]
- _ = x[GvPVposEstDisc-28]
- _ = x[GvGiveUpDiff-29]
- _ = x[GvGiveUpProb-30]
- _ = x[GvGiveUp-31]
- _ = x[GvGaveUp-32]
- _ = x[GvVSPatchPos-33]
- _ = x[GvVSPatchPosPrev-34]
- _ = x[GvVSPatchPosSum-35]
- _ = x[GvLHbDip-36]
- _ = x[GvLHbBurst-37]
- _ = x[GvLHbPVDA-38]
- _ = x[GvCeMpos-39]
- _ = x[GvCeMneg-40]
- _ = x[GvVtaDA-41]
- _ = x[GvUSneg-42]
- _ = x[GvUSnegRaw-43]
- _ = x[GvDrives-44]
- _ = x[GvUSpos-45]
- _ = x[GvVSPatch-46]
- _ = x[GvVSPatchPrev-47]
- _ = x[GvOFCposUSPTMaint-48]
- _ = x[GvVSMatrixPoolGated-49]
- _ = x[GlobalVarsN-50]
-}
-
-const _GlobalVars_name = "GvRewGvHasRewGvRewPredGvPrevPredGvHadRewGvDAGvAChGvNEGvSerGvAChRawGvNotMaintGvVSMatrixJustGatedGvVSMatrixHasGatedGvCuriosityPoolGatedGvTimeGvEffortGvUrgencyRawGvUrgencyGvHasPosUSGvHadPosUSGvNegUSOutcomeGvHadNegUSOutcomeGvPVposSumGvPVposGvPVnegSumGvPVnegGvPVposEstGvPVposEstSumGvPVposEstDiscGvGiveUpDiffGvGiveUpProbGvGiveUpGvGaveUpGvVSPatchPosGvVSPatchPosPrevGvVSPatchPosSumGvLHbDipGvLHbBurstGvLHbPVDAGvCeMposGvCeMnegGvVtaDAGvUSnegGvUSnegRawGvDrivesGvUSposGvVSPatchGvVSPatchPrevGvOFCposUSPTMaintGvVSMatrixPoolGatedGlobalVarsN"
-
-var _GlobalVars_index = [...]uint16{0, 5, 13, 22, 32, 40, 44, 49, 53, 58, 66, 76, 95, 113, 133, 139, 147, 159, 168, 178, 188, 202, 219, 229, 236, 246, 253, 263, 276, 290, 302, 314, 322, 330, 342, 358, 373, 381, 391, 400, 408, 416, 423, 430, 440, 448, 455, 464, 477, 494, 513, 524}
-
-func (i GlobalVars) String() string {
- if i < 0 || i >= GlobalVars(len(_GlobalVars_index)-1) {
- return "GlobalVars(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _GlobalVars_name[_GlobalVars_index[i]:_GlobalVars_index[i+1]]
-}
-
-func (i *GlobalVars) FromString(s string) error {
- for j := 0; j < len(_GlobalVars_index)-1; j++ {
- if s == _GlobalVars_name[_GlobalVars_index[j]:_GlobalVars_index[j+1]] {
- *i = GlobalVars(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: GlobalVars")
-}
-
-var _GlobalVars_descMap = map[GlobalVars]string{
- 0: `Rew is reward value -- this is set here in the Context struct, and the RL Rew layer grabs it from there -- must also set HasRew flag when rew is set -- otherwise is ignored.`,
- 1: `HasRew must be set to true when a reward is present -- otherwise Rew is ignored. Also set when PVLV BOA model gives up. This drives ACh release in the PVLV model.`,
- 2: `RewPred is reward prediction -- computed by a special reward prediction layer`,
- 3: `PrevPred is previous time step reward prediction -- e.g., for TDPredLayer`,
- 4: `HadRew is HasRew state from the previous trial -- copied from HasRew in NewState -- used for updating Effort, Urgency at start of new trial`,
- 5: `DA is dopamine -- represents reward prediction error, signaled as phasic increases or decreases in activity relative to a tonic baseline, which is represented by a value of 0. Released by the VTA -- ventral tegmental area, or SNc -- substantia nigra pars compacta.`,
- 6: `ACh is acetylcholine -- activated by salient events, particularly at the onset of a reward / punishment outcome (US), or onset of a conditioned stimulus (CS). Driven by BLA -> PPtg that detects changes in BLA activity, via LDTLayer type`,
- 7: `NE is norepinepherine -- not yet in use`,
- 8: `Ser is serotonin -- not yet in use`,
- 9: `AChRaw is raw ACh value used in updating global ACh value by LDTLayer`,
- 10: `NotMaint is activity of the PTNotMaintLayer -- drives top-down inhibition of LDT layer / ACh activity.`,
- 11: `VSMatrixJustGated is VSMatrix just gated (to engage goal maintenance in PFC areas), set at end of plus phase -- this excludes any gating happening at time of US`,
- 12: `VSMatrixHasGated is VSMatrix has gated since the last time HasRew was set (US outcome received or expected one failed to be received`,
- 13: `CuriosityPoolGated is true if VSMatrixJustGated and the first pool representing the curiosity / novelty drive gated -- this can change the giving up Effort.Max parameter.`,
- 14: `Time is raw time counter, incrementing upward during goal engaged window. This is also copied directly into NegUS[0] which tracks time, but we maintain a separate effort value to make it clearer.`,
- 15: `Effort is raw effort counter -- incrementing upward for each effort step during goal engaged window. This is also copied directly into NegUS[1] which tracks effort, but we maintain a separate effort value to make it clearer.`,
- 16: `UrgencyRaw is raw effort for urgency -- incrementing upward from effort increments per step when _not_ goal engaged`,
- 17: `Urgency is the overall urgency activity level (normalized 0-1), computed from logistic function of GvUrgencyRaw`,
- 18: `HasPosUS indicates has positive US on this trial -- drives goal accomplishment logic and gating.`,
- 19: `HadPosUS is state from the previous trial (copied from HasPosUS in NewState).`,
- 20: `NegUSOutcome indicates that a strong negative US stimulus was experienced, driving phasic ACh, VSMatrix gating to reset current goal engaged plan (if any), and phasic dopamine based on the outcome.`,
- 21: `HadNegUSOutcome is state from the previous trial (copied from NegUSOutcome in NewState)`,
- 22: `PVposSum is total weighted positive valence primary value = sum of Weight * USpos * Drive`,
- 23: `PVpos is normalized positive valence primary value = (1 - 1/(1+PVposGain * PVposSum))`,
- 24: `PVnegSum is total weighted negative valence primary value = sum of Weight * USneg`,
- 25: `PVpos is normalized negative valence primary value = (1 - 1/(1+PVnegGain * PVnegSum))`,
- 26: `PVposEst is the estimated PVpos value based on OFCposUSPT and VSMatrix gating`,
- 27: `PVposEstSum is the sum that goes into computing estimated PVpos value based on OFCposUSPT and VSMatrix gating`,
- 28: `PVposEstDisc is the discounted version of PVposEst, subtracting VSPatchPosSum, which represents the accumulated expectation of PVpos to this point.`,
- 29: `GiveUpDiff is the difference: PVposEstDisc - PVneg representing the expected positive outcome up to this point. When this turns negative, the chance of giving up goes up proportionally, as a logistic function of this difference.`,
- 30: `GiveUpProb is the probability from the logistic function of GiveUpDiff`,
- 31: `GiveUp is true if a reset was triggered probabilistically based on GiveUpProb`,
- 32: `GaveUp is copy of GiveUp from previous trial`,
- 33: `VSPatchPos is net shunting input from VSPatch (PosD1, named PVi in original PVLV) computed as the Max of US-specific VSPatch saved values. This is also stored as GvRewPred.`,
- 34: `VSPatchPosPrev is the previous-trial version of VSPatchPos -- for adjusting the VSPatchThr threshold`,
- 35: `VSPatchPosSum is the sum of VSPatchPos over goal engaged trials, representing the integrated prediction that the US is going to occur`,
- 36: `computed LHb activity level that drives dipping / pausing of DA firing, when VSPatch pos prediction > actual PV reward drive or PVneg > PVpos`,
- 37: `LHbBurst is computed LHb activity level that drives bursts of DA firing, when actual PV reward drive > VSPatch pos prediction`,
- 38: `LHbPVDA is GvLHbBurst - GvLHbDip -- the LHb contribution to DA, reflecting PV and VSPatch (PVi), but not the CS (LV) contributions`,
- 39: `CeMpos is positive valence central nucleus of the amygdala (CeM) LV (learned value) activity, reflecting |BLAPosAcqD1 - BLAPosExtD2|_+ positively rectified. CeM sets Raw directly. Note that a positive US onset even with no active Drive will be reflected here, enabling learning about unexpected outcomes`,
- 40: `CeMneg is negative valence central nucleus of the amygdala (CeM) LV (learned value) activity, reflecting |BLANegAcqD2 - BLANegExtD1|_+ positively rectified. CeM sets Raw directly`,
- 41: `VtaDA is overall dopamine value reflecting all of the different inputs`,
- 42: `USneg are negative valence US outcomes -- normalized version of raw, NNegUSs of them`,
- 43: `USnegRaw are raw, linearly incremented negative valence US outcomes, this value is also integrated together with all US vals for PVneg`,
- 44: `Drives is current drive state -- updated with optional homeostatic exponential return to baseline values`,
- 45: `USpos is current positive-valence drive-satisfying input(s) (unconditioned stimuli = US)`,
- 46: `VSPatch is current reward predicting VSPatch (PosD1) values`,
- 47: `VSPatch is previous reward predicting VSPatch (PosD1) values`,
- 48: `OFCposUSPTMaint is activity level of given OFCposUSPT maintenance pool used in anticipating potential USpos outcome value`,
- 49: `VSMatrixPoolGated indicates whether given VSMatrix pool gated this is reset after last goal accomplished -- records gating since then.`,
- 50: ``,
-}
-
-func (i GlobalVars) Desc() string {
- if str, ok := _GlobalVars_descMap[i]; ok {
- return str
- }
- return "GlobalVars(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/gplayertypes_string.go b/axon/gplayertypes_string.go
deleted file mode 100644
index d713eacd4..000000000
--- a/axon/gplayertypes_string.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Code generated by "stringer -type=GPLayerTypes"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[GPeOut-0]
- _ = x[GPeIn-1]
- _ = x[GPeTA-2]
- _ = x[GPi-3]
- _ = x[GPLayerTypesN-4]
-}
-
-const _GPLayerTypes_name = "GPeOutGPeInGPeTAGPiGPLayerTypesN"
-
-var _GPLayerTypes_index = [...]uint8{0, 6, 11, 16, 19, 32}
-
-func (i GPLayerTypes) String() string {
- if i < 0 || i >= GPLayerTypes(len(_GPLayerTypes_index)-1) {
- return "GPLayerTypes(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _GPLayerTypes_name[_GPLayerTypes_index[i]:_GPLayerTypes_index[i+1]]
-}
-
-func (i *GPLayerTypes) FromString(s string) error {
- for j := 0; j < len(_GPLayerTypes_index)-1; j++ {
- if s == _GPLayerTypes_name[_GPLayerTypes_index[j]:_GPLayerTypes_index[j+1]] {
- *i = GPLayerTypes(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: GPLayerTypes")
-}
-
-var _GPLayerTypes_descMap = map[GPLayerTypes]string{
- 0: `GPeOut is Outer layer of GPe neurons, receiving inhibition from MtxGo`,
- 1: `GPeIn is Inner layer of GPe neurons, receiving inhibition from GPeOut and MtxNo`,
- 2: `GPeTA is arkypallidal layer of GPe neurons, receiving inhibition from GPeIn and projecting inhibition to Mtx`,
- 3: `GPi is the inner globus pallidus, functionally equivalent to SNr, receiving from MtxGo and GPeIn, and sending inhibition to VThal`,
- 4: ``,
-}
-
-func (i GPLayerTypes) Desc() string {
- if str, ok := _GPLayerTypes_descMap[i]; ok {
- return str
- }
- return "GPLayerTypes(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/gpu.go b/axon/gpu.go
index cd599dda0..69f5d632f 100644
--- a/axon/gpu.go
+++ b/axon/gpu.go
@@ -10,16 +10,16 @@ import (
"math"
"unsafe"
- "github.com/emer/empi/mpi"
- "github.com/goki/gi/oswin"
+ "github.com/emer/empi/v2/mpi"
"github.com/goki/vgpu/vgpu"
vk "github.com/goki/vulkan"
+ "goki.dev/goosi"
)
//go:embed shaders/*.spv
var content embed.FS
-//go:generate gosl -exclude=Update,UpdateParams,Defaults,AllParams github.com/goki/mat32/fastexp.go github.com/emer/etable/minmax ../chans/chans.go ../chans ../kinase ../fsfffb/inhib.go ../fsfffb github.com/emer/emergent/etime github.com/emer/emergent/ringidx rand.go avgmax.go neuromod.go globals.go context.go neuron.go synapse.go pool.go layervals.go act.go act_prjn.go inhib.go learn.go layertypes.go layerparams.go deep_layers.go rl_layers.go pvlv_layers.go pcore_layers.go prjntypes.go prjnparams.go deep_prjns.go rl_prjns.go pvlv_prjns.go pcore_prjns.go hip_prjns.go gpu_hlsl
+//go:generate gosl -exclude=Update,UpdateParams,Defaults,AllParams goki.dev/mat32/v2/fastexp.go goki.dev/etable/v2/minmax ../chans/chans.go ../chans ../kinase ../fsfffb/inhib.go ../fsfffb github.com/emer/emergent/v2/etime github.com/emer/emergent/v2/ringidx rand.go avgmax.go neuromod.go globals.go context.go neuron.go synapse.go pool.go layervals.go act.go act_prjn.go inhib.go learn.go layertypes.go layerparams.go deep_layers.go rl_layers.go pvlv_layers.go pcore_layers.go prjntypes.go prjnparams.go deep_prjns.go rl_prjns.go pvlv_prjns.go pcore_prjns.go hip_prjns.go gpu_hlsl
// Full vars code -- each gpu_*.hlsl uses a subset
@@ -116,7 +116,7 @@ const CyclesN = 10
type PushOff struct {
// offset
- Off uint32 `desc:"offset"`
+ Off uint32
pad, pad1, pad2 uint32
}
@@ -126,71 +126,71 @@ type PushOff struct {
type GPU struct {
// if true, actually use the GPU
- On bool `desc:"if true, actually use the GPU"`
+ On bool
RecFunTimes bool `desc:"if true, slower separate shader pipeline runs are used, with a CPU-sync Wait at the end, to enable timing information about each individual shader to be collected using the network FunTimer system. otherwise, only aggregate information is available about the entire Cycle call.`
// if true, process each cycle one at a time. Otherwise, 10 cycles at a time are processed in one batch.
- CycleByCycle bool `desc:"if true, process each cycle one at a time. Otherwise, 10 cycles at a time are processed in one batch."`
+ CycleByCycle bool
- // [view: -] the network we operate on -- we live under this net
- Net *Network `view:"-" desc:"the network we operate on -- we live under this net"`
+ // the network we operate on -- we live under this net
+ Net *Network `view:"-"`
- // [view: -] the context we use
- Ctx *Context `view:"-" desc:"the context we use"`
+ // the context we use
+ Ctx *Context `view:"-"`
- // [view: -] the vgpu compute system
- Sys *vgpu.System `view:"-" desc:"the vgpu compute system"`
+ // the vgpu compute system
+ Sys *vgpu.System `view:"-"`
- // [view: -] VarSet = 0: the uniform LayerParams
- Params *vgpu.VarSet `view:"-" desc:"VarSet = 0: the uniform LayerParams"`
+ // VarSet = 0: the uniform LayerParams
+ Params *vgpu.VarSet `view:"-"`
- // [view: -] VarSet = 1: the storage indexes and PrjnParams
- Idxs *vgpu.VarSet `view:"-" desc:"VarSet = 1: the storage indexes and PrjnParams"`
+ // VarSet = 1: the storage indexes and PrjnParams
+ Idxs *vgpu.VarSet `view:"-"`
- // [view: -] VarSet = 2: the Storage buffer for RW state structs and neuron floats
- Structs *vgpu.VarSet `view:"-" desc:"VarSet = 2: the Storage buffer for RW state structs and neuron floats"`
+ // VarSet = 2: the Storage buffer for RW state structs and neuron floats
+ Structs *vgpu.VarSet `view:"-"`
- // [view: -] Varset = 3: the Storage buffer for synapses
- Syns *vgpu.VarSet `view:"-" desc:"Varset = 3: the Storage buffer for synapses"`
+ // Varset = 3: the Storage buffer for synapses
+ Syns *vgpu.VarSet `view:"-"`
- // [view: -] Varset = 4: the Storage buffer for SynCa banks
- SynCas *vgpu.VarSet `view:"-" desc:"Varset = 4: the Storage buffer for SynCa banks"`
+ // Varset = 4: the Storage buffer for SynCa banks
+ SynCas *vgpu.VarSet `view:"-"`
- // [view: -] for sequencing commands
- Semaphores map[string]vk.Semaphore `view:"-" desc:"for sequencing commands"`
+ // for sequencing commands
+ Semaphores map[string]vk.Semaphore `view:"-"`
- // [def: 64] [view: -] number of warp threads -- typically 64 -- must update all hlsl files if changed!
- NThreads int `view:"-" inactive:"-" def:"64" desc:"number of warp threads -- typically 64 -- must update all hlsl files if changed!"`
+ // number of warp threads -- typically 64 -- must update all hlsl files if changed!
+ NThreads int `view:"-" inactive:"-" def:"64"`
- // [view: -] maximum number of bytes per individual storage buffer element, from GPUProps.Limits.MaxStorageBufferRange
- MaxBufferBytes uint32 `view:"-" desc:"maximum number of bytes per individual storage buffer element, from GPUProps.Limits.MaxStorageBufferRange"`
+ // maximum number of bytes per individual storage buffer element, from GPUProps.Limits.MaxStorageBufferRange
+ MaxBufferBytes uint32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas0 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas0 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas1 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas1 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas2 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas2 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas3 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas3 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas4 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas4 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas5 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas5 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas6 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas6 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas7 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas7 []float32 `view:"-"`
- // [view: -] tracks var binding
- DidBind map[string]bool `view:"-" desc:"tracks var binding"`
+ // tracks var binding
+ DidBind map[string]bool `view:"-"`
}
// ConfigGPUwithGUI turns on GPU mode in context of an active GUI where Vulkan
@@ -198,7 +198,7 @@ type GPU struct {
// Configures the GPU -- call after Network is Built, initialized, params are set,
// and everything is ready to run.
func (nt *Network) ConfigGPUwithGUI(ctx *Context) {
- oswin.TheApp.RunOnMain(func() {
+ goosi.TheApp.RunOnMain(func() {
nt.GPU.Config(ctx, nt)
})
fmt.Printf("Running on GPU: %s\n", TheGPU.DeviceName)
diff --git a/axon/gtigen.go b/axon/gtigen.go
new file mode 100644
index 000000000..41a994c14
--- /dev/null
+++ b/axon/gtigen.go
@@ -0,0 +1,1900 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package axon
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SpikeParams",
+ ShortName: "axon.SpikeParams",
+ IDName: "spike-params",
+ Doc: "SpikeParams contains spiking activation function params.\nImplements a basic thresholded Vm model, and optionally\nthe AdEx adaptive exponential function (adapt is KNaAdapt)",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"act"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"act"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"act"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Thr", >i.Field{Name: "Thr", Type: "float32", LocalType: "float32", Doc: "threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization", Directives: gti.Directives{}, Tag: "def:\"0.5\""}},
+ {"VmR", >i.Field{Name: "VmR", Type: "float32", LocalType: "float32", Doc: "post-spiking membrane potential to reset to, produces refractory effect if lower than VmInit -- 0.3 is apropriate biologically-based value for AdEx (Brette & Gurstner, 2005) parameters. See also RTau", Directives: gti.Directives{}, Tag: "def:\"0.3\""}},
+ {"Tr", >i.Field{Name: "Tr", Type: "int32", LocalType: "int32", Doc: "post-spiking explicit refractory period, in cycles -- prevents Vm updating for this number of cycles post firing -- Vm is reduced in exponential steps over this period according to RTau, being fixed at Tr to VmR exactly", Directives: gti.Directives{}, Tag: "min:\"1\" def:\"3\""}},
+ {"RTau", >i.Field{Name: "RTau", Type: "float32", LocalType: "float32", Doc: "time constant for decaying Vm down to VmR -- at end of Tr it is set to VmR exactly -- this provides a more realistic shape of the post-spiking Vm which is only relevant for more realistic channels that key off of Vm -- does not otherwise affect standard computation", Directives: gti.Directives{}, Tag: "def:\"1.6667\""}},
+ {"Exp", >i.Field{Name: "Exp", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "if true, turn on exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"ExpSlope", >i.Field{Name: "ExpSlope", Type: "float32", LocalType: "float32", Doc: "slope in Vm (2 mV = .02 in normalized units) for extra exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation", Directives: gti.Directives{}, Tag: "viewif:\"Exp\" def:\"0.02\""}},
+ {"ExpThr", >i.Field{Name: "ExpThr", Type: "float32", LocalType: "float32", Doc: "membrane potential threshold for actually triggering a spike when using the exponential mechanism", Directives: gti.Directives{}, Tag: "viewif:\"Exp\" def:\"0.9\""}},
+ {"MaxHz", >i.Field{Name: "MaxHz", Type: "float32", LocalType: "float32", Doc: "for translating spiking interval (rate) into rate-code activation equivalent, what is the maximum firing rate associated with a maximum activation value of 1", Directives: gti.Directives{}, Tag: "def:\"180\" min:\"1\""}},
+ {"ISITau", >i.Field{Name: "ISITau", Type: "float32", LocalType: "float32", Doc: "constant for integrating the spiking interval in estimating spiking rate", Directives: gti.Directives{}, Tag: "def:\"5\" min:\"1\""}},
+ {"ISIDt", >i.Field{Name: "ISIDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RDt", >i.Field{Name: "RDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.DendParams",
+ ShortName: "axon.DendParams",
+ IDName: "dend-params",
+ Doc: "DendParams are the parameters for updating dendrite-specific dynamics",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GbarExp", >i.Field{Name: "GbarExp", Type: "float32", LocalType: "float32", Doc: "dendrite-specific strength multiplier of the exponential spiking drive on Vm -- e.g., .5 makes it half as strong as at the soma (which uses Gbar.L as a strength multiplier per the AdEx standard model)", Directives: gti.Directives{}, Tag: "def:\"0.2,0.5\""}},
+ {"GbarR", >i.Field{Name: "GbarR", Type: "float32", LocalType: "float32", Doc: "dendrite-specific conductance of Kdr delayed rectifier currents, used to reset membrane potential for dendrite -- applied for Tr msec", Directives: gti.Directives{}, Tag: "def:\"3,6\""}},
+ {"SSGi", >i.Field{Name: "SSGi", Type: "float32", LocalType: "float32", Doc: "SST+ somatostatin positive slow spiking inhibition level specifically affecting dendritic Vm (VmDend) -- this is important for countering a positive feedback loop from NMDA getting stronger over the course of learning -- also typically requires SubMean = 1 for TrgAvgAct and learning to fully counter this feedback loop.", Directives: gti.Directives{}, Tag: "def:\"0,2\""}},
+ {"HasMod", >i.Field{Name: "HasMod", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "set automatically based on whether this layer has any recv projections that have a GType conductance type of Modulatory -- if so, then multiply GeSyn etc by GModSyn", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ModGain", >i.Field{Name: "ModGain", Type: "float32", LocalType: "float32", Doc: "multiplicative gain factor on the total modulatory input -- this can also be controlled by the PrjnScale.Abs factor on ModulatoryG inputs, but it is convenient to be able to control on the layer as well.", Directives: gti.Directives{}, Tag: ""}},
+ {"ModBase", >i.Field{Name: "ModBase", Type: "float32", LocalType: "float32", Doc: "baseline modulatory level for modulatory effects -- net modulation is ModBase + ModGain * GModSyn", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.ActInitParams",
+ ShortName: "axon.ActInitParams",
+ IDName: "act-init-params",
+ Doc: "ActInitParams are initial values for key network state variables.\nInitialized in InitActs called by InitWts, and provides target values for DecayState.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Vm", >i.Field{Name: "Vm", Type: "float32", LocalType: "float32", Doc: "initial membrane potential -- see Erev.L for the resting potential (typically .3)", Directives: gti.Directives{}, Tag: "def:\"0.3\""}},
+ {"Act", >i.Field{Name: "Act", Type: "float32", LocalType: "float32", Doc: "initial activation value -- typically 0", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"GeBase", >i.Field{Name: "GeBase", Type: "float32", LocalType: "float32", Doc: "baseline level of excitatory conductance (net input) -- Ge is initialized to this value, and it is added in as a constant background level of excitatory input -- captures all the other inputs not represented in the model, and intrinsic excitability, etc", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"GiBase", >i.Field{Name: "GiBase", Type: "float32", LocalType: "float32", Doc: "baseline level of inhibitory conductance (net input) -- Gi is initialized to this value, and it is added in as a constant background level of inhibitory input -- captures all the other inputs not represented in the model", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"GeVar", >i.Field{Name: "GeVar", Type: "float32", LocalType: "float32", Doc: "variance (sigma) of gaussian distribution around baseline Ge values, per unit, to establish variability in intrinsic excitability. value never goes < 0", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"GiVar", >i.Field{Name: "GiVar", Type: "float32", LocalType: "float32", Doc: "variance (sigma) of gaussian distribution around baseline Gi values, per unit, to establish variability in intrinsic excitability. value never goes < 0", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.DecayParams",
+ ShortName: "axon.DecayParams",
+ IDName: "decay-params",
+ Doc: "DecayParams control the decay of activation state in the DecayState function\ncalled in NewState when a new state is to be processed.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"act"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Act", >i.Field{Name: "Act", Type: "float32", LocalType: "float32", Doc: "proportion to decay most activation state variables toward initial values at start of every ThetaCycle (except those controlled separately below) -- if 1 it is effectively equivalent to full clear, resetting other derived values. ISI is reset every AlphaCycle to get a fresh sample of activations (doesn't affect direct computation -- only readout).", Directives: gti.Directives{}, Tag: "def:\"0,0.2,0.5,1\" max:\"1\" min:\"0\""}},
+ {"Glong", >i.Field{Name: "Glong", Type: "float32", LocalType: "float32", Doc: "proportion to decay long-lasting conductances, NMDA and GABA, and also the dendritic membrane potential -- when using random stimulus order, it is important to decay this significantly to allow a fresh start -- but set Act to 0 to enable ongoing activity to keep neurons in their sensitive regime.", Directives: gti.Directives{}, Tag: "def:\"0,0.6\" max:\"1\" min:\"0\""}},
+ {"AHP", >i.Field{Name: "AHP", Type: "float32", LocalType: "float32", Doc: "decay of afterhyperpolarization currents, including mAHP, sAHP, and KNa -- has a separate decay because often useful to have this not decay at all even if decay is on.", Directives: gti.Directives{}, Tag: "def:\"0\" max:\"1\" min:\"0\""}},
+ {"LearnCa", >i.Field{Name: "LearnCa", Type: "float32", LocalType: "float32", Doc: "decay of Ca variables driven by spiking activity used in learning: CaSpk* and Ca* variables. These are typically not decayed but may need to be in some situations.", Directives: gti.Directives{}, Tag: "def:\"0\" max:\"1\" min:\"0\""}},
+ {"OnRew", >i.Field{Name: "OnRew", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "decay layer at end of ThetaCycle when there is a global reward -- true by default for PTPred, PTMaint and PFC Super layers", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.DtParams",
+ ShortName: "axon.DtParams",
+ IDName: "dt-params",
+ Doc: "DtParams are time and rate constants for temporal derivatives in Axon (Vm, G)",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Integ", >i.Field{Name: "Integ", Type: "float32", LocalType: "float32", Doc: "overall rate constant for numerical integration, for all equations at the unit level -- all time constants are specified in millisecond units, with one cycle = 1 msec -- if you instead want to make one cycle = 2 msec, you can do this globally by setting this integ value to 2 (etc). However, stability issues will likely arise if you go too high. For improved numerical stability, you may even need to reduce this value to 0.5 or possibly even lower (typically however this is not necessary). MUST also coordinate this with network.time_inc variable to ensure that global network.time reflects simulated time accurately", Directives: gti.Directives{}, Tag: "def:\"1,0.5\" min:\"0\""}},
+ {"VmTau", >i.Field{Name: "VmTau", Type: "float32", LocalType: "float32", Doc: "membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized", Directives: gti.Directives{}, Tag: "def:\"2.81\" min:\"1\""}},
+ {"VmDendTau", >i.Field{Name: "VmDendTau", Type: "float32", LocalType: "float32", Doc: "dendritic membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized", Directives: gti.Directives{}, Tag: "def:\"5\" min:\"1\""}},
+ {"VmSteps", >i.Field{Name: "VmSteps", Type: "int32", LocalType: "int32", Doc: "number of integration steps to take in computing new Vm value -- this is the one computation that can be most numerically unstable so taking multiple steps with proportionally smaller dt is beneficial", Directives: gti.Directives{}, Tag: "def:\"2\" min:\"1\""}},
+ {"GeTau", >i.Field{Name: "GeTau", Type: "float32", LocalType: "float32", Doc: "time constant for decay of excitatory AMPA receptor conductance.", Directives: gti.Directives{}, Tag: "def:\"5\" min:\"1\""}},
+ {"GiTau", >i.Field{Name: "GiTau", Type: "float32", LocalType: "float32", Doc: "time constant for decay of inhibitory GABAa receptor conductance.", Directives: gti.Directives{}, Tag: "def:\"7\" min:\"1\""}},
+ {"IntTau", >i.Field{Name: "IntTau", Type: "float32", LocalType: "float32", Doc: "time constant for integrating values over timescale of an individual input state (e.g., roughly 200 msec -- theta cycle), used in computing ActInt, GeInt from Ge, and GiInt from GiSyn -- this is used for scoring performance, not for learning, in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life),", Directives: gti.Directives{}, Tag: "def:\"40\" min:\"1\""}},
+ {"LongAvgTau", >i.Field{Name: "LongAvgTau", Type: "float32", LocalType: "float32", Doc: "time constant for integrating slower long-time-scale averages, such as nrn.ActAvg, Pool.ActsMAvg, ActsPAvg -- computed in NewState when a new input state is present (i.e., not msec but in units of a theta cycle) (tau is roughly how long it takes for value to change significantly) -- set lower for smaller models", Directives: gti.Directives{}, Tag: "def:\"20\" min:\"1\""}},
+ {"MaxCycStart", >i.Field{Name: "MaxCycStart", Type: "int32", LocalType: "int32", Doc: "cycle to start updating the SpkMaxCa, SpkMax values within a theta cycle -- early cycles often reflect prior state", Directives: gti.Directives{}, Tag: "def:\"10\" min:\"0\""}},
+ {"VmDt", >i.Field{Name: "VmDt", Type: "float32", LocalType: "float32", Doc: "nominal rate = Integ / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"VmDendDt", >i.Field{Name: "VmDendDt", Type: "float32", LocalType: "float32", Doc: "nominal rate = Integ / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"DtStep", >i.Field{Name: "DtStep", Type: "float32", LocalType: "float32", Doc: "1 / VmSteps", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"GeDt", >i.Field{Name: "GeDt", Type: "float32", LocalType: "float32", Doc: "rate = Integ / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"GiDt", >i.Field{Name: "GiDt", Type: "float32", LocalType: "float32", Doc: "rate = Integ / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"IntDt", >i.Field{Name: "IntDt", Type: "float32", LocalType: "float32", Doc: "rate = Integ / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"LongAvgDt", >i.Field{Name: "LongAvgDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SpikeNoiseParams",
+ ShortName: "axon.SpikeNoiseParams",
+ IDName: "spike-noise-params",
+ Doc: "SpikeNoiseParams parameterizes background spiking activity impinging on the neuron,\nsimulated using a poisson spiking process.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "add noise simulating background spiking levels", Directives: gti.Directives{}, Tag: ""}},
+ {"GeHz", >i.Field{Name: "GeHz", Type: "float32", LocalType: "float32", Doc: "mean frequency of excitatory spikes -- typically 50Hz but multiple inputs increase rate -- poisson lambda parameter, also the variance", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"100\""}},
+ {"Ge", >i.Field{Name: "Ge", Type: "float32", LocalType: "float32", Doc: "excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\""}},
+ {"GiHz", >i.Field{Name: "GiHz", Type: "float32", LocalType: "float32", Doc: "mean frequency of inhibitory spikes -- typically 100Hz fast spiking but multiple inputs increase rate -- poisson lambda parameter, also the variance", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"200\""}},
+ {"Gi", >i.Field{Name: "Gi", Type: "float32", LocalType: "float32", Doc: "excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\""}},
+ {"GeExpInt", >i.Field{Name: "GeExpInt", Type: "float32", LocalType: "float32", Doc: "Exp(-Interval) which is the threshold for GeNoiseP as it is updated", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"GiExpInt", >i.Field{Name: "GiExpInt", Type: "float32", LocalType: "float32", Doc: "Exp(-Interval) which is the threshold for GiNoiseP as it is updated", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.ClampParams",
+ ShortName: "axon.ClampParams",
+ IDName: "clamp-params",
+ Doc: "ClampParams specify how external inputs drive excitatory conductances\n(like a current clamp) -- either adds or overwrites existing conductances.\nNoise is added in either case.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"IsInput", >i.Field{Name: "IsInput", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "is this a clamped input layer? set automatically based on layer type at initialization", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"IsTarget", >i.Field{Name: "IsTarget", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "is this a target layer? set automatically based on layer type at initialization", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Ge", >i.Field{Name: "Ge", Type: "float32", LocalType: "float32", Doc: "amount of Ge driven for clamping -- generally use 0.8 for Target layers, 1.5 for Input layers", Directives: gti.Directives{}, Tag: "def:\"0.8,1.5\""}},
+ {"Add", >i.Field{Name: "Add", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "", Directives: gti.Directives{}, Tag: "def:\"false\" view:\"add external conductance on top of any existing -- generally this is not a good idea for target layers (creates a main effect that learning can never match), but may be ok for input layers\""}},
+ {"ErrThr", >i.Field{Name: "ErrThr", Type: "float32", LocalType: "float32", Doc: "threshold on neuron Act activity to count as active for computing error relative to target in PctErr method", Directives: gti.Directives{}, Tag: "def:\"0.5\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.AttnParams",
+ ShortName: "axon.AttnParams",
+ IDName: "attn-params",
+ Doc: "AttnParams determine how the Attn modulates Ge",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "is attentional modulation active?", Directives: gti.Directives{}, Tag: ""}},
+ {"Min", >i.Field{Name: "Min", Type: "float32", LocalType: "float32", Doc: "minimum act multiplier if attention is 0", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"RTThr", >i.Field{Name: "RTThr", Type: "float32", LocalType: "float32", Doc: "threshold on CaSpkP for determining the reaction time for the Layer -- starts after MaxCycStart to ensure that prior trial activity has had a chance to dissipate.", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PopCodeParams",
+ ShortName: "axon.PopCodeParams",
+ IDName: "pop-code-params",
+ Doc: "PopCodeParams provides an encoding of scalar value using population code,\nwhere a single continuous (scalar) value is encoded as a gaussian bump\nacross a population of neurons (1 dimensional).\nIt can also modulate rate code and number of neurons active according to the value.\nThis is for layers that represent values as in the PVLV system (from Context.PVLV).\nBoth normalized activation values (1 max) and Ge conductance values can be generated.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "use popcode encoding of variable(s) that this layer represents", Directives: gti.Directives{}, Tag: ""}},
+ {"Ge", >i.Field{Name: "Ge", Type: "float32", LocalType: "float32", Doc: "Ge multiplier for driving excitatory conductance based on PopCode -- multiplies normalized activation values", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.1\""}},
+ {"Min", >i.Field{Name: "Min", Type: "float32", LocalType: "float32", Doc: "minimum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"-0.1\""}},
+ {"Max", >i.Field{Name: "Max", Type: "float32", LocalType: "float32", Doc: "maximum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"1.1\""}},
+ {"MinAct", >i.Field{Name: "MinAct", Type: "float32", LocalType: "float32", Doc: "activation multiplier for values at Min end of range, where values at Max end have an activation of 1 -- if this is < 1, then there is a rate code proportional to the value in addition to the popcode pattern -- see also MinSigma, MaxSigma", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"1,0.5\""}},
+ {"MinSigma", >i.Field{Name: "MinSigma", Type: "float32", LocalType: "float32", Doc: "sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.1,0.08\""}},
+ {"MaxSigma", >i.Field{Name: "MaxSigma", Type: "float32", LocalType: "float32", Doc: "sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.1,0.12\""}},
+ {"Clip", >i.Field{Name: "Clip", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "ensure that encoded and decoded value remains within specified range", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.ActParams",
+ ShortName: "axon.ActParams",
+ IDName: "act-params",
+ Doc: "axon.ActParams contains all the activation computation params and functions\nfor basic Axon, at the neuron level .\nThis is included in axon.Layer to drive the computation.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Spikes", >i.Field{Name: "Spikes", Type: "github.com/emer/axon/axon.SpikeParams", LocalType: "SpikeParams", Doc: "Spiking function parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Dend", >i.Field{Name: "Dend", Type: "github.com/emer/axon/axon.DendParams", LocalType: "DendParams", Doc: "dendrite-specific parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Init", >i.Field{Name: "Init", Type: "github.com/emer/axon/axon.ActInitParams", LocalType: "ActInitParams", Doc: "initial values for key network state variables -- initialized in InitActs called by InitWts, and provides target values for DecayState", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Decay", >i.Field{Name: "Decay", Type: "github.com/emer/axon/axon.DecayParams", LocalType: "DecayParams", Doc: "amount to decay between AlphaCycles, simulating passage of time and effects of saccades etc, especially important for environments with random temporal structure (e.g., most standard neural net training corpora)", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Dt", >i.Field{Name: "Dt", Type: "github.com/emer/axon/axon.DtParams", LocalType: "DtParams", Doc: "time and rate constants for temporal derivatives / updating of activation state", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Gbar", >i.Field{Name: "Gbar", Type: "github.com/emer/axon/chans.Chans", LocalType: "chans.Chans", Doc: "maximal conductances levels for channels", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Erev", >i.Field{Name: "Erev", Type: "github.com/emer/axon/chans.Chans", LocalType: "chans.Chans", Doc: "reversal potentials for each channel", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Clamp", >i.Field{Name: "Clamp", Type: "github.com/emer/axon/axon.ClampParams", LocalType: "ClampParams", Doc: "how external inputs drive neural activations", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Noise", >i.Field{Name: "Noise", Type: "github.com/emer/axon/axon.SpikeNoiseParams", LocalType: "SpikeNoiseParams", Doc: "how, where, when, and how much noise to add", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"VmRange", >i.Field{Name: "VmRange", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "range for Vm membrane potential -- -- important to keep just at extreme range of reversal potentials to prevent numerical instability", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Mahp", >i.Field{Name: "Mahp", Type: "github.com/emer/axon/chans.MahpParams", LocalType: "chans.MahpParams", Doc: "M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Sahp", >i.Field{Name: "Sahp", Type: "github.com/emer/axon/chans.SahpParams", LocalType: "chans.SahpParams", Doc: "slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"KNa", >i.Field{Name: "KNa", Type: "github.com/emer/axon/chans.KNaMedSlow", LocalType: "chans.KNaMedSlow", Doc: "sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"NMDA", >i.Field{Name: "NMDA", Type: "github.com/emer/axon/chans.NMDAParams", LocalType: "chans.NMDAParams", Doc: "NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning.", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"MaintNMDA", >i.Field{Name: "MaintNMDA", Type: "github.com/emer/axon/chans.NMDAParams", LocalType: "chans.NMDAParams", Doc: "NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning.", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GabaB", >i.Field{Name: "GabaB", Type: "github.com/emer/axon/chans.GABABParams", LocalType: "chans.GABABParams", Doc: "GABA-B / GIRK channel parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"VGCC", >i.Field{Name: "VGCC", Type: "github.com/emer/axon/chans.VGCCParams", LocalType: "chans.VGCCParams", Doc: "voltage gated calcium channels -- provide a key additional source of Ca for learning and positive-feedback loop upstate for active neurons", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"AK", >i.Field{Name: "AK", Type: "github.com/emer/axon/chans.AKsParams", LocalType: "chans.AKsParams", Doc: "A-type potassium (K) channel that is particularly important for limiting the runaway excitation from VGCC channels", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"SKCa", >i.Field{Name: "SKCa", Type: "github.com/emer/axon/chans.SKCaParams", LocalType: "chans.SKCaParams", Doc: "small-conductance calcium-activated potassium channel produces the pausing function as a consequence of rapid bursting.", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"AttnMod", >i.Field{Name: "AttnMod", Type: "github.com/emer/axon/axon.AttnParams", LocalType: "AttnParams", Doc: "Attentional modulation parameters: how Attn modulates Ge", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"PopCode", >i.Field{Name: "PopCode", Type: "github.com/emer/axon/axon.PopCodeParams", LocalType: "PopCodeParams", Doc: "provides encoding population codes, used to represent a single continuous (scalar) value, across a population of units / neurons (1 dimensional)", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PrjnGTypes",
+ ShortName: "axon.PrjnGTypes",
+ IDName: "prjn-g-types",
+ Doc: "PrjnGTypes represents the conductance (G) effects of a given projection,\nincluding excitatory, inhibitory, and modulatory.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"act_prjn"}},
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SynComParams",
+ ShortName: "axon.SynComParams",
+ IDName: "syn-com-params",
+ Doc: "SynComParams are synaptic communication parameters:\nused in the Prjn parameters. Includes delay and\nprobability of failure, and Inhib for inhibitory connections,\nand modulatory projections that have multiplicative-like effects.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GType", >i.Field{Name: "GType", Type: "github.com/emer/axon/axon.PrjnGTypes", LocalType: "PrjnGTypes", Doc: "type of conductance (G) communicated by this projection", Directives: gti.Directives{}, Tag: ""}},
+ {"Delay", >i.Field{Name: "Delay", Type: "uint32", LocalType: "uint32", Doc: "additional synaptic delay in msec for inputs arriving at this projection. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Prjn in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value.", Directives: gti.Directives{}, Tag: "min:\"0\" def:\"2\""}},
+ {"MaxDelay", >i.Field{Name: "MaxDelay", Type: "uint32", LocalType: "uint32", Doc: "maximum value of Delay -- based on MaxDelay values when the BuildGBuf function was called when the network was built -- cannot set it longer than this, except by calling BuildGBuf on network after changing MaxDelay to a larger value in any projection in the network.", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"PFail", >i.Field{Name: "PFail", Type: "float32", LocalType: "float32", Doc: "probability of synaptic transmission failure -- if > 0, then weights are turned off at random as a function of PFail (times 1-SWt if PFailSwt)", Directives: gti.Directives{}, Tag: ""}},
+ {"PFailSWt", >i.Field{Name: "PFailSWt", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "if true, then probability of failure is inversely proportional to SWt structural / slow weight value (i.e., multiply PFail * (1-SWt)))", Directives: gti.Directives{}, Tag: ""}},
+ {"DelLen", >i.Field{Name: "DelLen", Type: "uint32", LocalType: "uint32", Doc: "delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PrjnScaleParams",
+ ShortName: "axon.PrjnScaleParams",
+ IDName: "prjn-scale-params",
+ Doc: "PrjnScaleParams are projection scaling parameters: modulates overall strength of projection,\nusing both absolute and relative factors.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"act_prjn"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Rel", >i.Field{Name: "Rel", Type: "float32", LocalType: "float32", Doc: "relative scaling that shifts balance between different projections -- this is subject to normalization across all other projections into receiving neuron, and determines the GScale.Target for adapting scaling", Directives: gti.Directives{}, Tag: "min:\"0\""}},
+ {"Abs", >i.Field{Name: "Abs", Type: "float32", LocalType: "float32", Doc: "absolute multiplier adjustment factor for the prjn scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value", Directives: gti.Directives{}, Tag: "def:\"1\" min:\"0\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.AvgMaxI32",
+ ShortName: "axon.AvgMaxI32",
+ IDName: "avg-max-i-32",
+ Doc: "AvgMaxI32 holds average and max statistics for float32,\nand values used for computing them incrementally,\nusing a fixed precision int32 based float representation\nthat can be used with GPU-based atomic add and max functions.\nThis ONLY works for positive values with averages around 1, and\nthe N must be set IN ADVANCE to the correct number of items.\nOnce Calc() is called, the incremental values are reset\nvia Init() so it is always ready for updating without a separate\nInit() pass.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"avgmaxi"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Avg", >i.Field{Name: "Avg", Type: "float32", LocalType: "float32", Doc: "Average, from Calc when last computed as Sum / N", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Max", >i.Field{Name: "Max", Type: "float32", LocalType: "float32", Doc: "Maximum value, copied from CurMax in Calc", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Sum", >i.Field{Name: "Sum", Type: "int32", LocalType: "int32", Doc: "sum for computing average -- incremented in UpdateVal, reset in Calc", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"CurMax", >i.Field{Name: "CurMax", Type: "int32", LocalType: "int32", Doc: "current maximum value, updated via UpdateVal, reset in Calc", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"N", >i.Field{Name: "N", Type: "int32", LocalType: "int32", Doc: "number of items in the sum -- this must be set in advance to a known value and it is used in computing the float <-> int conversion factor to maximize precision.", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.AxonNetwork",
+ ShortName: "axon.AxonNetwork",
+ IDName: "axon-network",
+ Doc: "AxonNetwork defines the essential algorithmic API for Axon, at the network level.\nThese are the methods that the user calls in their Sim code:\n* NewState\n* Cycle\n* NewPhase\n* DWt\n* WtFmDwt\nBecause we don't want to have to force the user to use the interface cast in calling\nthese methods, we provide Impl versions here that are the implementations\nwhich the user-facing method calls through the interface cast.\nSpecialized algorithms should thus only change the Impl version, which is what\nis exposed here in this interface.\n\nThere is now a strong constraint that all Cycle level computation takes place\nin one pass at the Layer level, which greatly improves threading efficiency.\n\nAll of the structural API is in emer.Network, which this interface also inherits for\nconvenience.",
+ Directives: gti.Directives{},
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.AxonLayer",
+ ShortName: "axon.AxonLayer",
+ IDName: "axon-layer",
+ Doc: "AxonLayer defines the essential algorithmic API for Axon, at the layer level.\nThese are the methods that the axon.Network calls on its layers at each step\nof processing. Other Layer types can selectively re-implement (override) these methods\nto modify the computation, while inheriting the basic behavior for non-overridden methods.\n\nAll of the structural API is in emer.Layer, which this interface also inherits for\nconvenience.",
+ Directives: gti.Directives{},
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.AxonPrjn",
+ ShortName: "axon.AxonPrjn",
+ IDName: "axon-prjn",
+ Doc: "AxonPrjn defines the essential algorithmic API for Axon, at the projection level.\nThese are the methods that the axon.Layer calls on its prjns at each step\nof processing. Other Prjn types can selectively re-implement (override) these methods\nto modify the computation, while inheriting the basic behavior for non-overridden methods.\n\nAll of the structural API is in emer.Prjn, which this interface also inherits for\nconvenience.",
+ Directives: gti.Directives{},
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.AxonPrjns",
+ ShortName: "axon.AxonPrjns",
+ IDName: "axon-prjns",
+ Doc: "",
+ Directives: gti.Directives{},
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.NetIdxs",
+ ShortName: "axon.NetIdxs",
+ IDName: "net-idxs",
+ Doc: "NetIdxs are indexes and sizes for processing network",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"context"}},
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"context"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"context"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"context"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"NData", >i.Field{Name: "NData", Type: "uint32", LocalType: "uint32", Doc: "number of data parallel items to process currently", Directives: gti.Directives{}, Tag: "min:\"1\""}},
+ {"NetIdx", >i.Field{Name: "NetIdx", Type: "uint32", LocalType: "uint32", Doc: "network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"MaxData", >i.Field{Name: "MaxData", Type: "uint32", LocalType: "uint32", Doc: "maximum amount of data parallel", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NLayers", >i.Field{Name: "NLayers", Type: "uint32", LocalType: "uint32", Doc: "number of layers in the network", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NNeurons", >i.Field{Name: "NNeurons", Type: "uint32", LocalType: "uint32", Doc: "total number of neurons", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NPools", >i.Field{Name: "NPools", Type: "uint32", LocalType: "uint32", Doc: "total number of pools excluding * MaxData factor", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NSyns", >i.Field{Name: "NSyns", Type: "uint32", LocalType: "uint32", Doc: "total number of synapses", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"GPUMaxBuffFloats", >i.Field{Name: "GPUMaxBuffFloats", Type: "uint32", LocalType: "uint32", Doc: "maximum size in float32 (4 bytes) of a GPU buffer -- needed for GPU access", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"GPUSynCaBanks", >i.Field{Name: "GPUSynCaBanks", Type: "uint32", LocalType: "uint32", Doc: "total number of SynCa banks of GPUMaxBufferBytes arrays in GPU", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"PVLVNPosUSs", >i.Field{Name: "PVLVNPosUSs", Type: "uint32", LocalType: "uint32", Doc: "total number of PVLV Drives / positive USs", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"PVLVNNegUSs", >i.Field{Name: "PVLVNNegUSs", Type: "uint32", LocalType: "uint32", Doc: "total number of PVLV Negative USs", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"GvUSnegOff", >i.Field{Name: "GvUSnegOff", Type: "uint32", LocalType: "uint32", Doc: "offset into GlobalVars for USneg values", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"GvUSnegStride", >i.Field{Name: "GvUSnegStride", Type: "uint32", LocalType: "uint32", Doc: "stride into GlobalVars for USneg values", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"GvUSposOff", >i.Field{Name: "GvUSposOff", Type: "uint32", LocalType: "uint32", Doc: "offset into GlobalVars for USpos, Drive, VSPatch values values", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"GvUSposStride", >i.Field{Name: "GvUSposStride", Type: "uint32", LocalType: "uint32", Doc: "stride into GlobalVars for USpos, Drive, VSPatch values", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.Context",
+ ShortName: "axon.Context",
+ IDName: "context",
+ Doc: "Context contains all of the global context state info\nthat is shared across every step of the computation.\nIt is passed around to all relevant computational functions,\nand is updated on the CPU and synced to the GPU after every cycle.\nIt is the *only* mechanism for communication from CPU to GPU.\nIt contains timing, Testing vs. Training mode, random number context,\nglobal neuromodulation, etc.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Mode", >i.Field{Name: "Mode", Type: "github.com/emer/emergent/v2/etime.Modes", LocalType: "etime.Modes", Doc: "current evaluation mode, e.g., Train, Test, etc", Directives: gti.Directives{}, Tag: ""}},
+ {"Testing", >i.Field{Name: "Testing", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "if true, the model is being run in a testing mode, so no weight changes or other associated computations are needed. this flag should only affect learning-related behavior. Is automatically updated based on Mode != Train", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Phase", >i.Field{Name: "Phase", Type: "int32", LocalType: "int32", Doc: "phase counter: typicaly 0-1 for minus-plus but can be more phases for other algorithms", Directives: gti.Directives{}, Tag: ""}},
+ {"PlusPhase", >i.Field{Name: "PlusPhase", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "true if this is the plus phase, when the outcome / bursting is occurring, driving positive learning -- else minus phase", Directives: gti.Directives{}, Tag: ""}},
+ {"PhaseCycle", >i.Field{Name: "PhaseCycle", Type: "int32", LocalType: "int32", Doc: "cycle within current phase -- minus or plus", Directives: gti.Directives{}, Tag: ""}},
+ {"Cycle", >i.Field{Name: "Cycle", Type: "int32", LocalType: "int32", Doc: "cycle counter: number of iterations of activation updating (settling) on the current state -- this counts time sequentially until reset with NewState", Directives: gti.Directives{}, Tag: ""}},
+ {"ThetaCycles", >i.Field{Name: "ThetaCycles", Type: "int32", LocalType: "int32", Doc: "length of the theta cycle in terms of 1 msec Cycles -- some network update steps depend on doing something at the end of the theta cycle (e.g., CTCtxtPrjn).", Directives: gti.Directives{}, Tag: "def:\"200\""}},
+ {"CyclesTotal", >i.Field{Name: "CyclesTotal", Type: "int32", LocalType: "int32", Doc: "total cycle count -- increments continuously from whenever it was last reset -- typically this is number of milliseconds in simulation time -- is int32 and not uint32 b/c used with Synapse CaUpT which needs to have a -1 case for expired update time", Directives: gti.Directives{}, Tag: ""}},
+ {"Time", >i.Field{Name: "Time", Type: "float32", LocalType: "float32", Doc: "accumulated amount of time the network has been running, in simulation-time (not real world time), in seconds", Directives: gti.Directives{}, Tag: ""}},
+ {"TrialsTotal", >i.Field{Name: "TrialsTotal", Type: "int32", LocalType: "int32", Doc: "total trial count -- increments continuously in NewState call *only in Train mode* from whenever it was last reset -- can be used for synchronizing weight updates across nodes", Directives: gti.Directives{}, Tag: ""}},
+ {"TimePerCycle", >i.Field{Name: "TimePerCycle", Type: "float32", LocalType: "float32", Doc: "amount of time to increment per cycle", Directives: gti.Directives{}, Tag: "def:\"0.001\""}},
+ {"SlowInterval", >i.Field{Name: "SlowInterval", Type: "int32", LocalType: "int32", Doc: "how frequently to perform slow adaptive processes such as synaptic scaling, inhibition adaptation, associated in the brain with sleep, in the SlowAdapt method. This should be long enough for meaningful changes to accumulate -- 100 is default but could easily be longer in larger models. Because SlowCtr is incremented by NData, high NData cases (e.g. 16) likely need to increase this value -- e.g., 400 seems to produce overall consistent results in various models.", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"SlowCtr", >i.Field{Name: "SlowCtr", Type: "int32", LocalType: "int32", Doc: "counter for how long it has been since last SlowAdapt step. Note that this is incremented by NData to maintain consistency across different values of this parameter.", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"SynCaCtr", >i.Field{Name: "SynCaCtr", Type: "float32", LocalType: "float32", Doc: "synaptic calcium counter, which drives the CaUpT synaptic value to optimize updating of this computationally expensive factor. It is incremented by 1 for each cycle, and reset at the SlowInterval, at which point the synaptic calcium values are all reset.", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"NetIdxs", >i.Field{Name: "NetIdxs", Type: "github.com/emer/axon/axon.NetIdxs", LocalType: "NetIdxs", Doc: "indexes and sizes of current network", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"NeuronVars", >i.Field{Name: "NeuronVars", Type: "github.com/emer/axon/axon.NeuronVarStrides", LocalType: "NeuronVarStrides", Doc: "stride offsets for accessing neuron variables", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"NeuronAvgVars", >i.Field{Name: "NeuronAvgVars", Type: "github.com/emer/axon/axon.NeuronAvgVarStrides", LocalType: "NeuronAvgVarStrides", Doc: "stride offsets for accessing neuron average variables", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"NeuronIdxs", >i.Field{Name: "NeuronIdxs", Type: "github.com/emer/axon/axon.NeuronIdxStrides", LocalType: "NeuronIdxStrides", Doc: "stride offsets for accessing neuron indexes", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseVars", >i.Field{Name: "SynapseVars", Type: "github.com/emer/axon/axon.SynapseVarStrides", LocalType: "SynapseVarStrides", Doc: "stride offsets for accessing synapse variables", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseCaVars", >i.Field{Name: "SynapseCaVars", Type: "github.com/emer/axon/axon.SynapseCaStrides", LocalType: "SynapseCaStrides", Doc: "stride offsets for accessing synapse Ca variables", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseIdxs", >i.Field{Name: "SynapseIdxs", Type: "github.com/emer/axon/axon.SynapseIdxStrides", LocalType: "SynapseIdxStrides", Doc: "stride offsets for accessing synapse indexes", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RandCtr", >i.Field{Name: "RandCtr", Type: "goki.dev/gosl/v2/slrand.Counter", LocalType: "slrand.Counter", Doc: "random counter -- incremented by maximum number of possible random numbers generated per cycle, regardless of how many are actually used -- this is shared across all layers so must encompass all possible param settings.", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.BurstParams",
+ ShortName: "axon.BurstParams",
+ IDName: "burst-params",
+ Doc: "BurstParams determine how the 5IB Burst activation is computed from\nCaSpkP integrated spiking values in Super layers -- thresholded.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"deep_layers"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"ThrRel", >i.Field{Name: "ThrRel", Type: "float32", LocalType: "float32", Doc: "Relative component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). This is the distance between the average and maximum activation values within layer (e.g., 0 = average, 1 = max). Overall effective threshold is MAX of relative and absolute thresholds.", Directives: gti.Directives{}, Tag: "max:\"1\" def:\"0.1\""}},
+ {"ThrAbs", >i.Field{Name: "ThrAbs", Type: "float32", LocalType: "float32", Doc: "Absolute component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). Overall effective threshold is MAX of relative and absolute thresholds.", Directives: gti.Directives{}, Tag: "min:\"0\" max:\"1\" def:\"0.1\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.CTParams",
+ ShortName: "axon.CTParams",
+ IDName: "ct-params",
+ Doc: "CTParams control the CT corticothalamic neuron special behavior",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GeGain", >i.Field{Name: "GeGain", Type: "float32", LocalType: "float32", Doc: "gain factor for context excitatory input, which is constant as compared to the spiking input from other projections, so it must be downscaled accordingly. This can make a difference and may need to be scaled up or down.", Directives: gti.Directives{}, Tag: "def:\"0.05,0.1,1,2\""}},
+ {"DecayTau", >i.Field{Name: "DecayTau", Type: "float32", LocalType: "float32", Doc: "decay time constant for context Ge input -- if > 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50", Directives: gti.Directives{}, Tag: "def:\"0,50\""}},
+ {"DecayDt", >i.Field{Name: "DecayDt", Type: "float32", LocalType: "float32", Doc: "1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PulvParams",
+ ShortName: "axon.PulvParams",
+ IDName: "pulv-params",
+ Doc: "PulvParams provides parameters for how the plus-phase (outcome)\nstate of Pulvinar thalamic relay cell neurons is computed from\nthe corresponding driver neuron Burst activation (or CaSpkP if not Super)",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"DriveScale", >i.Field{Name: "DriveScale", Type: "float32", LocalType: "float32", Doc: "multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit.", Directives: gti.Directives{}, Tag: "def:\"0.1\" min:\"0.0\""}},
+ {"FullDriveAct", >i.Field{Name: "FullDriveAct", Type: "float32", LocalType: "float32", Doc: "Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning.", Directives: gti.Directives{}, Tag: "def:\"0.6\" min:\"0.01\""}},
+ {"DriveLayIdx", >i.Field{Name: "DriveLayIdx", Type: "int32", LocalType: "int32", Doc: "index of layer that generates the driving activity into this one -- set via SetBuildConfig(DriveLayName) setting", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.GlobalVars",
+ ShortName: "axon.GlobalVars",
+ IDName: "global-vars",
+ Doc: "GlobalVars are network-wide variables, such as neuromodulators, reward, drives, etc\nincluding the state for the PVLV phasic dopamine model.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"globals"}},
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PushOff",
+ ShortName: "axon.PushOff",
+ IDName: "push-off",
+ Doc: "PushOff has push constants for setting offset into compute shader",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Off", >i.Field{Name: "Off", Type: "uint32", LocalType: "uint32", Doc: "offset", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.GPU",
+ ShortName: "axon.GPU",
+ IDName: "gpu",
+ Doc: "GPU manages all of the GPU-based computation for a given Network.\nLives within the network.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "bool", LocalType: "bool", Doc: "if true, actually use the GPU", Directives: gti.Directives{}, Tag: ""}},
+ {"RecFunTimes", >i.Field{Name: "RecFunTimes", Type: "bool", LocalType: "bool", Doc: "", Directives: gti.Directives{}, Tag: "desc:\"if true, slower separate shader pipeline runs are used, with a CPU-sync Wait at the end, to enable timing information about each individual shader to be collected using the network FunTimer system. otherwise, only aggregate information is available about the entire Cycle call."}},
+ {"CycleByCycle", >i.Field{Name: "CycleByCycle", Type: "bool", LocalType: "bool", Doc: "if true, process each cycle one at a time. Otherwise, 10 cycles at a time are processed in one batch.", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*Network", Doc: "the network we operate on -- we live under this net", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Ctx", >i.Field{Name: "Ctx", Type: "*github.com/emer/axon/axon.Context", LocalType: "*Context", Doc: "the context we use", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Sys", >i.Field{Name: "Sys", Type: "*github.com/goki/vgpu/vgpu.System", LocalType: "*vgpu.System", Doc: "the vgpu compute system", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Params", >i.Field{Name: "Params", Type: "*github.com/goki/vgpu/vgpu.VarSet", LocalType: "*vgpu.VarSet", Doc: "VarSet = 0: the uniform LayerParams", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Idxs", >i.Field{Name: "Idxs", Type: "*github.com/goki/vgpu/vgpu.VarSet", LocalType: "*vgpu.VarSet", Doc: "VarSet = 1: the storage indexes and PrjnParams", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Structs", >i.Field{Name: "Structs", Type: "*github.com/goki/vgpu/vgpu.VarSet", LocalType: "*vgpu.VarSet", Doc: "VarSet = 2: the Storage buffer for RW state structs and neuron floats", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Syns", >i.Field{Name: "Syns", Type: "*github.com/goki/vgpu/vgpu.VarSet", LocalType: "*vgpu.VarSet", Doc: "Varset = 3: the Storage buffer for synapses", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynCas", >i.Field{Name: "SynCas", Type: "*github.com/goki/vgpu/vgpu.VarSet", LocalType: "*vgpu.VarSet", Doc: "Varset = 4: the Storage buffer for SynCa banks", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Semaphores", >i.Field{Name: "Semaphores", Type: "map[string]github.com/goki/vulkan.Semaphore", LocalType: "map[string]vk.Semaphore", Doc: "for sequencing commands", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of warp threads -- typically 64 -- must update all hlsl files if changed!", Directives: gti.Directives{}, Tag: "view:\"-\" inactive:\"-\" def:\"64\""}},
+ {"MaxBufferBytes", >i.Field{Name: "MaxBufferBytes", Type: "uint32", LocalType: "uint32", Doc: "maximum number of bytes per individual storage buffer element, from GPUProps.Limits.MaxStorageBufferRange", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseCas0", >i.Field{Name: "SynapseCas0", Type: "[]float32", LocalType: "[]float32", Doc: "bank of floats for GPU access", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseCas1", >i.Field{Name: "SynapseCas1", Type: "[]float32", LocalType: "[]float32", Doc: "bank of floats for GPU access", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseCas2", >i.Field{Name: "SynapseCas2", Type: "[]float32", LocalType: "[]float32", Doc: "bank of floats for GPU access", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseCas3", >i.Field{Name: "SynapseCas3", Type: "[]float32", LocalType: "[]float32", Doc: "bank of floats for GPU access", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseCas4", >i.Field{Name: "SynapseCas4", Type: "[]float32", LocalType: "[]float32", Doc: "bank of floats for GPU access", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseCas5", >i.Field{Name: "SynapseCas5", Type: "[]float32", LocalType: "[]float32", Doc: "bank of floats for GPU access", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseCas6", >i.Field{Name: "SynapseCas6", Type: "[]float32", LocalType: "[]float32", Doc: "bank of floats for GPU access", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseCas7", >i.Field{Name: "SynapseCas7", Type: "[]float32", LocalType: "[]float32", Doc: "bank of floats for GPU access", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"DidBind", >i.Field{Name: "DidBind", Type: "map[string]bool", LocalType: "map[string]bool", Doc: "tracks var binding", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.HipConfig",
+ ShortName: "axon.HipConfig",
+ IDName: "hip-config",
+ Doc: "HipConfig have the hippocampus size and connectivity parameters",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"EC2Size", >i.Field{Name: "EC2Size", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "size of EC2", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"EC3NPool", >i.Field{Name: "EC3NPool", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "number of EC3 pools (outer dimension)", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"EC3NNrn", >i.Field{Name: "EC3NNrn", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "number of neurons in one EC3 pool", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"CA1NNrn", >i.Field{Name: "CA1NNrn", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "number of neurons in one CA1 pool", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"CA3Size", >i.Field{Name: "CA3Size", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "size of CA3", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"DGRatio", >i.Field{Name: "DGRatio", Type: "float32", LocalType: "float32", Doc: "size of DG / CA3", Directives: gti.Directives{}, Tag: "def:\"2.236\""}},
+ {"EC3ToEC2PCon", >i.Field{Name: "EC3ToEC2PCon", Type: "float32", LocalType: "float32", Doc: "percent connectivity from EC3 to EC2", Directives: gti.Directives{}, Tag: "def:\"0.1\""}},
+ {"EC2ToDGPCon", >i.Field{Name: "EC2ToDGPCon", Type: "float32", LocalType: "float32", Doc: "percent connectivity from EC2 to DG", Directives: gti.Directives{}, Tag: "def:\"0.25\""}},
+ {"EC2ToCA3PCon", >i.Field{Name: "EC2ToCA3PCon", Type: "float32", LocalType: "float32", Doc: "percent connectivity from EC2 to CA3", Directives: gti.Directives{}, Tag: "def:\"0.25\""}},
+ {"CA3ToCA1PCon", >i.Field{Name: "CA3ToCA1PCon", Type: "float32", LocalType: "float32", Doc: "percent connectivity from CA3 to CA1", Directives: gti.Directives{}, Tag: "def:\"0.25\""}},
+ {"DGToCA3PCon", >i.Field{Name: "DGToCA3PCon", Type: "float32", LocalType: "float32", Doc: "percent connectivity into CA3 from DG", Directives: gti.Directives{}, Tag: "def:\"0.02\""}},
+ {"EC2LatRadius", >i.Field{Name: "EC2LatRadius", Type: "int", LocalType: "int", Doc: "lateral radius of connectivity in EC2", Directives: gti.Directives{}, Tag: ""}},
+ {"EC2LatSigma", >i.Field{Name: "EC2LatSigma", Type: "float32", LocalType: "float32", Doc: "lateral gaussian sigma in EC2 for how quickly weights fall off with distance", Directives: gti.Directives{}, Tag: ""}},
+ {"MossyDelta", >i.Field{Name: "MossyDelta", Type: "float32", LocalType: "float32", Doc: "proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"MossyDeltaTest", >i.Field{Name: "MossyDeltaTest", Type: "float32", LocalType: "float32", Doc: "proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc", Directives: gti.Directives{}, Tag: "def:\"0.75\""}},
+ {"ThetaLow", >i.Field{Name: "ThetaLow", Type: "float32", LocalType: "float32", Doc: "low theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model", Directives: gti.Directives{}, Tag: "def:\"0.9\""}},
+ {"ThetaHigh", >i.Field{Name: "ThetaHigh", Type: "float32", LocalType: "float32", Doc: "high theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"EC5Clamp", >i.Field{Name: "EC5Clamp", Type: "bool", LocalType: "bool", Doc: "flag for clamping the EC5 from EC5ClampSrc", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"EC5ClampSrc", >i.Field{Name: "EC5ClampSrc", Type: "string", LocalType: "string", Doc: "source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available", Directives: gti.Directives{}, Tag: "def:\"EC3\""}},
+ {"EC5ClampTest", >i.Field{Name: "EC5ClampTest", Type: "bool", LocalType: "bool", Doc: "clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"EC5ClampThr", >i.Field{Name: "EC5ClampThr", Type: "float32", LocalType: "float32", Doc: "threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization.", Directives: gti.Directives{}, Tag: "def:\"0.1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.HipPrjnParams",
+ ShortName: "axon.HipPrjnParams",
+ IDName: "hip-prjn-params",
+ Doc: "HipPrjnParams define behavior of hippocampus prjns, which have special learning rules",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"hip_prjns"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Hebb", >i.Field{Name: "Hebb", Type: "float32", LocalType: "float32", Doc: "Hebbian learning proportion", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Err", >i.Field{Name: "Err", Type: "float32", LocalType: "float32", Doc: "EDL proportion", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"SAvgCor", >i.Field{Name: "SAvgCor", Type: "float32", LocalType: "float32", Doc: "proportion of correction to apply to sending average activation for hebbian learning component (0=none, 1=all, .5=half, etc)", Directives: gti.Directives{}, Tag: "def:\"0.4:0.8\" min:\"0\" max:\"1\""}},
+ {"SAvgThr", >i.Field{Name: "SAvgThr", Type: "float32", LocalType: "float32", Doc: "threshold of sending average activation below which learning does not occur (prevents learning when there is no input)", Directives: gti.Directives{}, Tag: "def:\"0.01\" min:\"0\""}},
+ {"SNominal", >i.Field{Name: "SNominal", Type: "float32", LocalType: "float32", Doc: "sending layer Nominal (need to manually set it to be the same as the sending layer)", Directives: gti.Directives{}, Tag: "def:\"0.1\" min:\"0\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.ActAvgParams",
+ ShortName: "axon.ActAvgParams",
+ IDName: "act-avg-params",
+ Doc: "ActAvgParams represents the nominal average activity levels in the layer\nand parameters for adapting the computed Gi inhibition levels to maintain\naverage activity within a target range.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"inhib"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"inhib"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"inhib"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Nominal", >i.Field{Name: "Nominal", Type: "float32", LocalType: "float32", Doc: "nominal estimated average activity level in the layer, which is used in computing the scaling factor on sending projections from this layer. In general it should roughly match the layer ActAvg.ActMAvg value, which can be logged using the axon.LogAddDiagnosticItems function. If layers receiving from this layer are not getting enough Ge excitation, then this Nominal level can be lowered to increase projection strength (fewer active neurons means each one contributes more, so scaling factor goes as the inverse of activity level), or vice-versa if Ge is too high. It is also the basis for the target activity level used for the AdaptGi option -- see the Offset which is added to this value.", Directives: gti.Directives{}, Tag: "min:\"0\" step:\"0.01\""}},
+ {"AdaptGi", >i.Field{Name: "AdaptGi", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "enable adapting of layer inhibition Gi multiplier factor (stored in layer GiMult value) to maintain a Target layer level of ActAvg.ActMAvg. This generally works well and improves the long-term stability of the models. It is not enabled by default because it depends on having established a reasonable Nominal + Offset target activity level.", Directives: gti.Directives{}, Tag: ""}},
+ {"Offset", >i.Field{Name: "Offset", Type: "float32", LocalType: "float32", Doc: "offset to add to Nominal for the target average activity that drives adaptation of Gi for this layer. Typically the Nominal level is good, but sometimes Nominal must be adjusted up or down to achieve desired Ge scaling, so this Offset can compensate accordingly.", Directives: gti.Directives{}, Tag: "def:\"0\" min:\"0\" step:\"0.01\" viewif:\"AdaptGi\""}},
+ {"HiTol", >i.Field{Name: "HiTol", Type: "float32", LocalType: "float32", Doc: "tolerance for higher than Target target average activation as a proportion of that target value (0 = exactly the target, 0.2 = 20% higher than target) -- only once activations move outside this tolerance are inhibitory values adapted.", Directives: gti.Directives{}, Tag: "def:\"0\" viewif:\"AdaptGi\""}},
+ {"LoTol", >i.Field{Name: "LoTol", Type: "float32", LocalType: "float32", Doc: "tolerance for lower than Target target average activation as a proportion of that target value (0 = exactly the target, 0.5 = 50% lower than target) -- only once activations move outside this tolerance are inhibitory values adapted.", Directives: gti.Directives{}, Tag: "def:\"0.8\" viewif:\"AdaptGi\""}},
+ {"AdaptRate", >i.Field{Name: "AdaptRate", Type: "float32", LocalType: "float32", Doc: "rate of Gi adaptation as function of AdaptRate * (Target - ActMAvg) / Target -- occurs at spaced intervals determined by Network.SlowInterval value -- slower values such as 0.01 may be needed for large networks and sparse layers.", Directives: gti.Directives{}, Tag: "def:\"0.1\" viewif:\"AdaptGi\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.TopoInhibParams",
+ ShortName: "axon.TopoInhibParams",
+ IDName: "topo-inhib-params",
+ Doc: "TopoInhibParams provides for topographic gaussian inhibition integrating over neighborhood.\nTODO: not currently being used",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "use topographic inhibition", Directives: gti.Directives{}, Tag: ""}},
+ {"Width", >i.Field{Name: "Width", Type: "int32", LocalType: "int32", Doc: "half-width of topographic inhibition within layer", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"Sigma", >i.Field{Name: "Sigma", Type: "float32", LocalType: "float32", Doc: "normalized gaussian sigma as proportion of Width, for gaussian weighting", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"Wrap", >i.Field{Name: "Wrap", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "half-width of topographic inhibition within layer", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"Gi", >i.Field{Name: "Gi", Type: "float32", LocalType: "float32", Doc: "overall inhibition multiplier for topographic inhibition (generally <= 1)", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"FF", >i.Field{Name: "FF", Type: "float32", LocalType: "float32", Doc: "overall inhibitory contribution from feedforward inhibition -- multiplies average Ge from pools or Ge from neurons", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"FB", >i.Field{Name: "FB", Type: "float32", LocalType: "float32", Doc: "overall inhibitory contribution from feedback inhibition -- multiplies average activation from pools or Act from neurons", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"FF0", >i.Field{Name: "FF0", Type: "float32", LocalType: "float32", Doc: "feedforward zero point for Ge per neuron (summed Ge is compared to N * FF0) -- below this level, no FF inhibition is computed, above this it is FF * (Sum Ge - N * FF0)", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"WidthWt", >i.Field{Name: "WidthWt", Type: "float32", LocalType: "float32", Doc: "weight value at width -- to assess the value of Sigma", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.InhibParams",
+ ShortName: "axon.InhibParams",
+ IDName: "inhib-params",
+ Doc: "axon.InhibParams contains all the inhibition computation params and functions for basic Axon\nThis is included in axon.Layer to support computation.\nThis also includes other misc layer-level params such as expected average activation in the layer\nwhich is used for Ge rescaling and potentially for adapting inhibition over time",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"ActAvg", >i.Field{Name: "ActAvg", Type: "github.com/emer/axon/axon.ActAvgParams", LocalType: "ActAvgParams", Doc: "layer-level and pool-level average activation initial values and updating / adaptation thereof -- initial values help determine initial scaling factors.", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Layer", >i.Field{Name: "Layer", Type: "github.com/emer/axon/fsfffb.GiParams", LocalType: "fsfffb.GiParams", Doc: "inhibition across the entire layer -- inputs generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers. If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition.", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Pool", >i.Field{Name: "Pool", Type: "github.com/emer/axon/fsfffb.GiParams", LocalType: "fsfffb.GiParams", Doc: "inhibition within sub-pools of units, for layers with 4D shape -- almost always need this if the layer has pools.", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.Layer",
+ ShortName: "axon.Layer",
+ IDName: "layer",
+ Doc: "axon.Layer implements the basic Axon spiking activation function,\nand manages learning in the projections.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Params", >i.Field{Name: "Params", Type: "*github.com/emer/axon/axon.LayerParams", LocalType: "*LayerParams", Doc: "all layer-level parameters -- these must remain constant once configured", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"LayerBase", >i.Field{Name: "LayerBase", Type: "github.com/emer/axon/axon.LayerBase", LocalType: "LayerBase", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LayerBase",
+ ShortName: "axon.LayerBase",
+ IDName: "layer-base",
+ Doc: "LayerBase manages the structural elements of the layer, which are common\nto any Layer type.\nThe Base does not have algorithm-specific methods and parameters, so it can be easily\nreused for different algorithms, and cleanly separates the algorithm-specific code.\nAny dependency on the algorithm-level Layer can be captured in the AxonLayer interface,\naccessed via the AxonLay field.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"AxonLay", >i.Field{Name: "AxonLay", Type: "github.com/emer/axon/axon.AxonLayer", LocalType: "AxonLayer", Doc: "we need a pointer to ourselves as an AxonLayer (which subsumes emer.Layer), which can always be used to extract the true underlying type of object when layer is embedded in other structs -- function receivers do not have this ability so this is necessary.", Directives: gti.Directives{}, Tag: "copy:\"-\" json:\"-\" xml:\"-\" view:\"-\""}},
+ {"Network", >i.Field{Name: "Network", Type: "*github.com/emer/axon/axon.Network", LocalType: "*Network", Doc: "our parent network, in case we need to use it to find other layers etc -- set when added by network", Directives: gti.Directives{}, Tag: "copy:\"-\" json:\"-\" xml:\"-\" view:\"-\""}},
+ {"Nm", >i.Field{Name: "Nm", Type: "string", LocalType: "string", Doc: "Name of the layer -- this must be unique within the network, which has a map for quick lookup and layers are typically accessed directly by name", Directives: gti.Directives{}, Tag: ""}},
+ {"Cls", >i.Field{Name: "Cls", Type: "string", LocalType: "string", Doc: "Class is for applying parameter styles, can be space separated multple tags", Directives: gti.Directives{}, Tag: ""}},
+ {"Off", >i.Field{Name: "Off", Type: "bool", LocalType: "bool", Doc: "inactivate this layer -- allows for easy experimentation", Directives: gti.Directives{}, Tag: ""}},
+ {"Shp", >i.Field{Name: "Shp", Type: "goki.dev/etable/v2/etensor.Shape", LocalType: "etensor.Shape", Doc: "shape of the layer -- can be 2D for basic layers and 4D for layers with sub-groups (hypercolumns) -- order is outer-to-inner (row major) so Y then X for 2D and for 4D: Y-X unit pools then Y-X neurons within pools", Directives: gti.Directives{}, Tag: ""}},
+ {"Typ", >i.Field{Name: "Typ", Type: "github.com/emer/axon/axon.LayerTypes", LocalType: "LayerTypes", Doc: "type of layer -- Hidden, Input, Target, Compare, or extended type in specialized algorithms -- matches against .Class parameter styles (e.g., .Hidden etc)", Directives: gti.Directives{}, Tag: ""}},
+ {"Rel", >i.Field{Name: "Rel", Type: "github.com/emer/emergent/v2/relpos.Rel", LocalType: "relpos.Rel", Doc: "Spatial relationship to other layer, determines positioning", Directives: gti.Directives{}, Tag: "tableview:\"-\" view:\"inline\""}},
+ {"Ps", >i.Field{Name: "Ps", Type: "goki.dev/mat32/v2.Vec3", LocalType: "mat32.Vec3", Doc: "position of lower-left-hand corner of layer in 3D space, computed from Rel. Layers are in X-Y width - height planes, stacked vertically in Z axis.", Directives: gti.Directives{}, Tag: "tableview:\"-\""}},
+ {"Idx", >i.Field{Name: "Idx", Type: "int", LocalType: "int", Doc: "a 0..n-1 index of the position of the layer within list of layers in the network. For Axon networks, it only has significance in determining who gets which weights for enforcing initial weight symmetry -- higher layers get weights from lower layers.", Directives: gti.Directives{}, Tag: "view:\"-\" inactive:\"-\""}},
+ {"NNeurons", >i.Field{Name: "NNeurons", Type: "uint32", LocalType: "uint32", Doc: "number of neurons in the layer", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"NeurStIdx", >i.Field{Name: "NeurStIdx", Type: "uint32", LocalType: "uint32", Doc: "starting index of neurons for this layer within the global Network list", Directives: gti.Directives{}, Tag: "view:\"-\" inactive:\"-\""}},
+ {"NPools", >i.Field{Name: "NPools", Type: "uint32", LocalType: "uint32", Doc: "number of pools based on layer shape -- at least 1 for layer pool + 4D subpools", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"MaxData", >i.Field{Name: "MaxData", Type: "uint32", LocalType: "uint32", Doc: "maximum amount of input data that can be processed in parallel in one pass of the network. Neuron, Pool, Vals storage is allocated to hold this amount.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RepIxs", >i.Field{Name: "RepIxs", Type: "[]int", LocalType: "[]int", Doc: "indexes of representative units in the layer, for computationally expensive stats or displays -- also set RepShp", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RepShp", >i.Field{Name: "RepShp", Type: "goki.dev/etable/v2/etensor.Shape", LocalType: "etensor.Shape", Doc: "shape of representative units in the layer -- if RepIxs is empty or .Shp is nil, use overall layer shape", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RcvPrjns", >i.Field{Name: "RcvPrjns", Type: "github.com/emer/axon/axon.AxonPrjns", LocalType: "AxonPrjns", Doc: "list of receiving projections into this layer from other layers", Directives: gti.Directives{}, Tag: ""}},
+ {"SndPrjns", >i.Field{Name: "SndPrjns", Type: "github.com/emer/axon/axon.AxonPrjns", LocalType: "AxonPrjns", Doc: "list of sending projections from this layer to other layers", Directives: gti.Directives{}, Tag: ""}},
+ {"Vals", >i.Field{Name: "Vals", Type: "[]github.com/emer/axon/axon.LayerVals", LocalType: "[]LayerVals", Doc: "layer-level state values that are updated during computation -- one for each data parallel -- is a sub-slice of network full set", Directives: gti.Directives{}, Tag: ""}},
+ {"Pools", >i.Field{Name: "Pools", Type: "[]github.com/emer/axon/axon.Pool", LocalType: "[]Pool", Doc: "computes FS-FFFB inhibition and other pooled, aggregate state variables -- has at least 1 for entire layer (lpl = layer pool), and one for each sub-pool if shape supports that (4D) * 1 per data parallel (inner loop). This is a sub-slice from overall Network Pools slice. You must iterate over index and use pointer to modify values.", Directives: gti.Directives{}, Tag: ""}},
+ {"Exts", >i.Field{Name: "Exts", Type: "[]float32", LocalType: "[]float32", Doc: "external input values for this layer, allocated from network global Exts slice", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"BuildConfig", >i.Field{Name: "BuildConfig", Type: "map[string]string", LocalType: "map[string]string", Doc: "configuration data set when the network is configured, that is used during the network Build() process via PostBuild method, after all the structure of the network has been fully constructed. In particular, the Params is nil until Build, so setting anything specific in there (e.g., an index to another layer) must be done as a second pass. Note that Params are all applied after Build and can set user-modifiable params, so this is for more special algorithm structural parameters set during ConfigNet() methods.,", Directives: gti.Directives{}, Tag: "tableview:\"-\""}},
+ {"DefParams", >i.Field{Name: "DefParams", Type: "github.com/emer/emergent/v2/params.Params", LocalType: "params.Params", Doc: "default parameters that are applied prior to user-set parameters -- these are useful for specific layer functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a layer type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map.", Directives: gti.Directives{}, Tag: "tableview:\"-\""}},
+ {"ParamsHistory", >i.Field{Name: "ParamsHistory", Type: "github.com/emer/emergent/v2/params.HistoryImpl", LocalType: "params.HistoryImpl", Doc: "provides a history of parameters applied to the layer", Directives: gti.Directives{}, Tag: "tableview:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LayerIdxs",
+ ShortName: "axon.LayerIdxs",
+ IDName: "layer-idxs",
+ Doc: "LayerIdxs contains index access into network global arrays for GPU.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"layerparams"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"layerparams"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"layerparams"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"LayIdx", >i.Field{Name: "LayIdx", Type: "uint32", LocalType: "uint32", Doc: "layer index", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"MaxData", >i.Field{Name: "MaxData", Type: "uint32", LocalType: "uint32", Doc: "maximum number of data parallel elements", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"PoolSt", >i.Field{Name: "PoolSt", Type: "uint32", LocalType: "uint32", Doc: "start of pools for this layer -- first one is always the layer-wide pool", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NeurSt", >i.Field{Name: "NeurSt", Type: "uint32", LocalType: "uint32", Doc: "start of neurons for this layer in global array (same as Layer.NeurStIdx)", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NeurN", >i.Field{Name: "NeurN", Type: "uint32", LocalType: "uint32", Doc: "number of neurons in layer", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"RecvSt", >i.Field{Name: "RecvSt", Type: "uint32", LocalType: "uint32", Doc: "start index into RecvPrjns global array", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"RecvN", >i.Field{Name: "RecvN", Type: "uint32", LocalType: "uint32", Doc: "number of recv projections", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"SendSt", >i.Field{Name: "SendSt", Type: "uint32", LocalType: "uint32", Doc: "start index into RecvPrjns global array", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"SendN", >i.Field{Name: "SendN", Type: "uint32", LocalType: "uint32", Doc: "number of recv projections", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ExtsSt", >i.Field{Name: "ExtsSt", Type: "uint32", LocalType: "uint32", Doc: "starting index in network global Exts list of external input for this layer -- only for Input / Target / Compare layer types", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ShpPlY", >i.Field{Name: "ShpPlY", Type: "int32", LocalType: "int32", Doc: "layer shape Pools Y dimension -- 1 for 2D", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ShpPlX", >i.Field{Name: "ShpPlX", Type: "int32", LocalType: "int32", Doc: "layer shape Pools X dimension -- 1 for 2D", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ShpUnY", >i.Field{Name: "ShpUnY", Type: "int32", LocalType: "int32", Doc: "layer shape Units Y dimension", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ShpUnX", >i.Field{Name: "ShpUnX", Type: "int32", LocalType: "int32", Doc: "layer shape Units X dimension", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LayerInhibIdxs",
+ ShortName: "axon.LayerInhibIdxs",
+ IDName: "layer-inhib-idxs",
+ Doc: "LayerInhibIdxs contains indexes of layers for between-layer inhibition",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Idx1", >i.Field{Name: "Idx1", Type: "int32", LocalType: "int32", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Idx2", >i.Field{Name: "Idx2", Type: "int32", LocalType: "int32", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Idx3", >i.Field{Name: "Idx3", Type: "int32", LocalType: "int32", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Idx4", >i.Field{Name: "Idx4", Type: "int32", LocalType: "int32", Doc: "idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LayerParams",
+ ShortName: "axon.LayerParams",
+ IDName: "layer-params",
+ Doc: "LayerParams contains all of the layer parameters.\nThese values must remain constant over the course of computation.\nOn the GPU, they are loaded into a uniform.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"LayType", >i.Field{Name: "LayType", Type: "github.com/emer/axon/axon.LayerTypes", LocalType: "LayerTypes", Doc: "functional type of layer -- determines functional code path for specialized layer types, and is synchronized with the Layer.Typ value", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"Acts", >i.Field{Name: "Acts", Type: "github.com/emer/axon/axon.ActParams", LocalType: "ActParams", Doc: "Activation parameters and methods for computing activations", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Inhib", >i.Field{Name: "Inhib", Type: "github.com/emer/axon/axon.InhibParams", LocalType: "InhibParams", Doc: "Inhibition parameters and methods for computing layer-level inhibition", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"LayInhib", >i.Field{Name: "LayInhib", Type: "github.com/emer/axon/axon.LayerInhibIdxs", LocalType: "LayerInhibIdxs", Doc: "indexes of layers that contribute between-layer inhibition to this layer -- set these indexes via BuildConfig LayInhibXName (X = 1, 2...)", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Learn", >i.Field{Name: "Learn", Type: "github.com/emer/axon/axon.LearnNeurParams", LocalType: "LearnNeurParams", Doc: "Learning parameters and methods that operate at the neuron level", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Bursts", >i.Field{Name: "Bursts", Type: "github.com/emer/axon/axon.BurstParams", LocalType: "BurstParams", Doc: "BurstParams determine how the 5IB Burst activation is computed from CaSpkP integrated spiking values in Super layers -- thresholded.", Directives: gti.Directives{}, Tag: "viewif:\"LayType=SuperLayer\" view:\"inline\""}},
+ {"CT", >i.Field{Name: "CT", Type: "github.com/emer/axon/axon.CTParams", LocalType: "CTParams", Doc: "] params for the CT corticothalamic layer and PTPred layer that generates predictions over the Pulvinar using context -- uses the CtxtGe excitatory input plus stronger NMDA channels to maintain context trace", Directives: gti.Directives{}, Tag: "viewif:\"LayType=[CTLayer,PTPredLayer,PTNotMaintLayer,BLALayer]\" view:\"inline\""}},
+ {"Pulv", >i.Field{Name: "Pulv", Type: "github.com/emer/axon/axon.PulvParams", LocalType: "PulvParams", Doc: "provides parameters for how the plus-phase (outcome) state of Pulvinar thalamic relay cell neurons is computed from the corresponding driver neuron Burst activation (or CaSpkP if not Super)", Directives: gti.Directives{}, Tag: "viewif:\"LayType=PulvinarLayer\" view:\"inline\""}},
+ {"Matrix", >i.Field{Name: "Matrix", Type: "github.com/emer/axon/axon.MatrixParams", LocalType: "MatrixParams", Doc: "parameters for BG Striatum Matrix MSN layers, which are the main Go / NoGo gating units in BG.", Directives: gti.Directives{}, Tag: "viewif:\"LayType=MatrixLayer\" view:\"inline\""}},
+ {"GP", >i.Field{Name: "GP", Type: "github.com/emer/axon/axon.GPParams", LocalType: "GPParams", Doc: "type of GP Layer.", Directives: gti.Directives{}, Tag: "viewif:\"LayType=GPLayer\" view:\"inline\""}},
+ {"VSPatch", >i.Field{Name: "VSPatch", Type: "github.com/emer/axon/axon.VSPatchParams", LocalType: "VSPatchParams", Doc: "parameters for VSPatch learning", Directives: gti.Directives{}, Tag: "viewif:\"LayType=VSPatchLayer\" view:\"inline\""}},
+ {"LDT", >i.Field{Name: "LDT", Type: "github.com/emer/axon/axon.LDTParams", LocalType: "LDTParams", Doc: "parameterizes laterodorsal tegmentum ACh salience neuromodulatory signal, driven by superior colliculus stimulus novelty, US input / absence, and OFC / ACC inhibition", Directives: gti.Directives{}, Tag: "viewif:\"LayType=LDTLayer\" view:\"inline\""}},
+ {"VTA", >i.Field{Name: "VTA", Type: "github.com/emer/axon/axon.VTAParams", LocalType: "VTAParams", Doc: "parameterizes computing overall VTA DA based on LHb PVDA (primary value -- at US time, computed at start of each trial and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned value (LV) activations, which update every cycle.", Directives: gti.Directives{}, Tag: "viewif:\"LayType=VTALayer\" view:\"inline\""}},
+ {"RWPred", >i.Field{Name: "RWPred", Type: "github.com/emer/axon/axon.RWPredParams", LocalType: "RWPredParams", Doc: "parameterizes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework).", Directives: gti.Directives{}, Tag: "viewif:\"LayType=RWPredLayer\" view:\"inline\""}},
+ {"RWDa", >i.Field{Name: "RWDa", Type: "github.com/emer/axon/axon.RWDaParams", LocalType: "RWDaParams", Doc: "parameterizes reward prediction dopamine for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework).", Directives: gti.Directives{}, Tag: "viewif:\"LayType=RWDaLayer\" view:\"inline\""}},
+ {"TDInteg", >i.Field{Name: "TDInteg", Type: "github.com/emer/axon/axon.TDIntegParams", LocalType: "TDIntegParams", Doc: "parameterizes TD reward integration layer", Directives: gti.Directives{}, Tag: "viewif:\"LayType=TDIntegLayer\" view:\"inline\""}},
+ {"TDDa", >i.Field{Name: "TDDa", Type: "github.com/emer/axon/axon.TDDaParams", LocalType: "TDDaParams", Doc: "parameterizes dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase.", Directives: gti.Directives{}, Tag: "viewif:\"LayType=TDDaLayer\" view:\"inline\""}},
+ {"Idxs", >i.Field{Name: "Idxs", Type: "github.com/emer/axon/axon.LayerIdxs", LocalType: "LayerIdxs", Doc: "recv and send projection array access info", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LayerTypes",
+ ShortName: "axon.LayerTypes",
+ IDName: "layer-types",
+ Doc: "LayerTypes is an axon-specific layer type enum,\nthat encompasses all the different algorithm types supported.\nClass parameter styles automatically key off of these types.\nThe first entries must be kept synchronized with the emer.LayerType,\nalthough we replace Hidden -> Super.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"layertypes"}},
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.ActAvgVals",
+ ShortName: "axon.ActAvgVals",
+ IDName: "act-avg-vals",
+ Doc: "ActAvgVals are long-running-average activation levels stored in the LayerVals,\nfor monitoring and adapting inhibition and possibly scaling parameters.\nAll of these integrate over NData within a network, so are the same across them.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"layervals"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"ActMAvg", >i.Field{Name: "ActMAvg", Type: "float32", LocalType: "float32", Doc: "running-average minus-phase activity integrated at Dt.LongAvgTau -- used for adapting inhibition relative to target level", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ActPAvg", >i.Field{Name: "ActPAvg", Type: "float32", LocalType: "float32", Doc: "running-average plus-phase activity integrated at Dt.LongAvgTau", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"AvgMaxGeM", >i.Field{Name: "AvgMaxGeM", Type: "float32", LocalType: "float32", Doc: "running-average max of minus-phase Ge value across the layer integrated at Dt.LongAvgTau", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"AvgMaxGiM", >i.Field{Name: "AvgMaxGiM", Type: "float32", LocalType: "float32", Doc: "running-average max of minus-phase Gi value across the layer integrated at Dt.LongAvgTau", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"GiMult", >i.Field{Name: "GiMult", Type: "float32", LocalType: "float32", Doc: "multiplier on inhibition -- adapted to maintain target activity level", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"AdaptThr", >i.Field{Name: "AdaptThr", Type: "float32", LocalType: "float32", Doc: "adaptive threshold -- only used for specialized layers, e.g., VSPatch", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.CorSimStats",
+ ShortName: "axon.CorSimStats",
+ IDName: "cor-sim-stats",
+ Doc: "CorSimStats holds correlation similarity (centered cosine aka normalized dot product)\nstatistics at the layer level",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Cor", >i.Field{Name: "Cor", Type: "float32", LocalType: "float32", Doc: "correlation (centered cosine aka normalized dot product) activation difference between ActP and ActM on this alpha-cycle for this layer -- computed by CorSimFmActs called by PlusPhase", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Avg", >i.Field{Name: "Avg", Type: "float32", LocalType: "float32", Doc: "running average of correlation similarity between ActP and ActM -- computed with CorSim.Tau time constant in PlusPhase", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Var", >i.Field{Name: "Var", Type: "float32", LocalType: "float32", Doc: "running variance of correlation similarity between ActP and ActM -- computed with CorSim.Tau time constant in PlusPhase", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LaySpecialVals",
+ ShortName: "axon.LaySpecialVals",
+ IDName: "lay-special-vals",
+ Doc: "LaySpecialVals holds special values used to communicate to other layers\nbased on neural values, used for special algorithms such as RL where\nsome of the computation is done algorithmically.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"V1", >i.Field{Name: "V1", Type: "float32", LocalType: "float32", Doc: "one value", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"V2", >i.Field{Name: "V2", Type: "float32", LocalType: "float32", Doc: "one value", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"V3", >i.Field{Name: "V3", Type: "float32", LocalType: "float32", Doc: "one value", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"V4", >i.Field{Name: "V4", Type: "float32", LocalType: "float32", Doc: "one value", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LayerVals",
+ ShortName: "axon.LayerVals",
+ IDName: "layer-vals",
+ Doc: "LayerVals holds extra layer state that is updated per layer.\nIt is sync'd down from the GPU to the CPU after every Cycle.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"LayIdx", >i.Field{Name: "LayIdx", Type: "uint32", LocalType: "uint32", Doc: "layer index for these vals", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"DataIdx", >i.Field{Name: "DataIdx", Type: "uint32", LocalType: "uint32", Doc: "data index for these vals", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RT", >i.Field{Name: "RT", Type: "float32", LocalType: "float32", Doc: "reaction time for this layer in cycles, which is -1 until the Max CaSpkP level (after MaxCycStart) exceeds the Act.Attn.RTThr threshold", Directives: gti.Directives{}, Tag: "inactive:\"-\""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"ActAvg", >i.Field{Name: "ActAvg", Type: "github.com/emer/axon/axon.ActAvgVals", LocalType: "ActAvgVals", Doc: "running-average activation levels used for adaptive inhibition, and other adapting values", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"CorSim", >i.Field{Name: "CorSim", Type: "github.com/emer/axon/axon.CorSimStats", LocalType: "CorSimStats", Doc: "correlation (centered cosine aka normalized dot product) similarity between ActM, ActP states", Directives: gti.Directives{}, Tag: ""}},
+ {"Special", >i.Field{Name: "Special", Type: "github.com/emer/axon/axon.LaySpecialVals", LocalType: "LaySpecialVals", Doc: "special values used to communicate to other layers based on neural values computed on the GPU -- special cross-layer computations happen CPU-side and are sent back into the network via Context on the next cycle -- used for special algorithms such as RL / DA etc", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.CaLrnParams",
+ ShortName: "axon.CaLrnParams",
+ IDName: "ca-lrn-params",
+ Doc: "CaLrnParams parameterizes the neuron-level calcium signals driving learning:\nCaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or\nuse the more complex and dynamic VGCC channel directly.\nCaLrn is then integrated in a cascading manner at multiple time scales:\nCaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"learn_neur"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"learn_neur"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"learn_neur"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Norm", >i.Field{Name: "Norm", Type: "float32", LocalType: "float32", Doc: "denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance", Directives: gti.Directives{}, Tag: "def:\"80\""}},
+ {"SpkVGCC", >i.Field{Name: "SpkVGCC", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"SpkVgccCa", >i.Field{Name: "SpkVgccCa", Type: "float32", LocalType: "float32", Doc: "multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode", Directives: gti.Directives{}, Tag: "def:\"35\""}},
+ {"VgccTau", >i.Field{Name: "VgccTau", Type: "float32", LocalType: "float32", Doc: "time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn", Directives: gti.Directives{}, Tag: "def:\"10\""}},
+ {"Dt", >i.Field{Name: "Dt", Type: "github.com/emer/axon/kinase.CaDtParams", LocalType: "kinase.CaDtParams", Doc: "time constants for integrating CaLrn across M, P and D cascading levels", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"UpdtThr", >i.Field{Name: "UpdtThr", Type: "float32", LocalType: "float32", Doc: "Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default.", Directives: gti.Directives{}, Tag: "def:\"0.01,0.02,0.5\""}},
+ {"VgccDt", >i.Field{Name: "VgccDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\" inactive:\"+\""}},
+ {"NormInv", >i.Field{Name: "NormInv", Type: "float32", LocalType: "float32", Doc: "= 1 / Norm", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\" inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.CaSpkParams",
+ ShortName: "axon.CaSpkParams",
+ IDName: "ca-spk-params",
+ Doc: "CaSpkParams parameterizes the neuron-level spike-driven calcium\nsignals, starting with CaSyn that is integrated at the neuron level\nand drives synapse-level, pre * post Ca integration, which provides the Tr\ntrace that multiplies error signals, and drives learning directly for Target layers.\nCaSpk* values are integrated separately at the Neuron level and used for UpdtThr\nand RLRate as a proxy for the activation (spiking) based learning signal.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SpikeG", >i.Field{Name: "SpikeG", Type: "float32", LocalType: "float32", Doc: "gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdtThr. Prjn.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller.", Directives: gti.Directives{}, Tag: "def:\"8,12\""}},
+ {"SynTau", >i.Field{Name: "SynTau", Type: "float32", LocalType: "float32", Doc: "time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PrjnParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau)", Directives: gti.Directives{}, Tag: "def:\"30\" min:\"1\""}},
+ {"SynDt", >i.Field{Name: "SynDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\" inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"Dt", >i.Field{Name: "Dt", Type: "github.com/emer/axon/kinase.CaDtParams", LocalType: "kinase.CaDtParams", Doc: "time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Prjn level for synaptic integration, except for the M factor.", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.TrgAvgActParams",
+ ShortName: "axon.TrgAvgActParams",
+ IDName: "trg-avg-act-params",
+ Doc: "TrgAvgActParams govern the target and actual long-term average activity in neurons.\nTarget value is adapted by neuron-wise error and difference in actual vs. target.\ndrives synaptic scaling at a slow timescale (Network.SlowInterval).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "whether to use target average activity mechanism to scale synaptic weights", Directives: gti.Directives{}, Tag: ""}},
+ {"GiBaseInit", >i.Field{Name: "GiBaseInit", Type: "float32", LocalType: "float32", Doc: "if this is > 0, then each neuron's GiBase is initialized as this proportion of TrgRange.Max - TrgAvg -- gives neurons differences in intrinsic inhibition / leak as a starting bias", Directives: gti.Directives{}, Tag: ""}},
+ {"ErrLRate", >i.Field{Name: "ErrLRate", Type: "float32", LocalType: "float32", Doc: "learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference.", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.02\""}},
+ {"SynScaleRate", >i.Field{Name: "SynScaleRate", Type: "float32", LocalType: "float32", Doc: "rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value.", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.005,0.0002\""}},
+ {"SubMean", >i.Field{Name: "SubMean", Type: "float32", LocalType: "float32", Doc: "amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point.", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0,1\""}},
+ {"Permute", >i.Field{Name: "Permute", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"true\""}},
+ {"Pool", >i.Field{Name: "Pool", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"TrgRange", >i.Field{Name: "TrgRange", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range.", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"{'Min':0.5,'Max':2}\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.RLRateParams",
+ ShortName: "axon.RLRateParams",
+ IDName: "rl-rate-params",
+ Doc: "RLRateParams are recv neuron learning rate modulation parameters.\nHas two factors: the derivative of the sigmoid based on CaSpkD\nactivity levels, and based on the phase-wise differences in activity (Diff).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "use learning rate modulation", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"SigmoidMin", >i.Field{Name: "SigmoidMin", Type: "float32", LocalType: "float32", Doc: "minimum learning rate multiplier for sigmoidal act (1-act) factor -- prevents lrate from going too low for extreme values. Set to 1 to disable Sigmoid derivative factor, which is default for Target layers.", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.05,1\""}},
+ {"Diff", >i.Field{Name: "Diff", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "modulate learning rate as a function of plus - minus differences", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"SpkThr", >i.Field{Name: "SpkThr", Type: "float32", LocalType: "float32", Doc: "threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies -- must be > 0 to prevent div by zero", Directives: gti.Directives{}, Tag: "viewif:\"On&&Diff\" def:\"0.1\""}},
+ {"DiffThr", >i.Field{Name: "DiffThr", Type: "float32", LocalType: "float32", Doc: "threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value", Directives: gti.Directives{}, Tag: "viewif:\"On&&Diff\" def:\"0.02\""}},
+ {"Min", >i.Field{Name: "Min", Type: "float32", LocalType: "float32", Doc: "for Diff component, minimum learning rate value when below ActDiffThr", Directives: gti.Directives{}, Tag: "viewif:\"On&&Diff\" def:\"0.001\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LearnNeurParams",
+ ShortName: "axon.LearnNeurParams",
+ IDName: "learn-neur-params",
+ Doc: "axon.LearnNeurParams manages learning-related parameters at the neuron-level.\nThis is mainly the running average activations that drive learning",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"CaLearn", >i.Field{Name: "CaLearn", Type: "github.com/emer/axon/axon.CaLrnParams", LocalType: "CaLrnParams", Doc: "parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"CaSpk", >i.Field{Name: "CaSpk", Type: "github.com/emer/axon/axon.CaSpkParams", LocalType: "CaSpkParams", Doc: "parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdtThr and RLRate as a proxy for the activation (spiking) based learning signal.", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"LrnNMDA", >i.Field{Name: "LrnNMDA", Type: "github.com/emer/axon/chans.NMDAParams", LocalType: "chans.NMDAParams", Doc: "NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"TrgAvgAct", >i.Field{Name: "TrgAvgAct", Type: "github.com/emer/axon/axon.TrgAvgActParams", LocalType: "TrgAvgActParams", Doc: "synaptic scaling parameters for regulating overall average activity compared to neuron's own target level", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"RLRate", >i.Field{Name: "RLRate", Type: "github.com/emer/axon/axon.RLRateParams", LocalType: "RLRateParams", Doc: "recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"NeuroMod", >i.Field{Name: "NeuroMod", Type: "github.com/emer/axon/axon.NeuroModParams", LocalType: "NeuroModParams", Doc: "neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SWtInitParams",
+ ShortName: "axon.SWtInitParams",
+ IDName: "s-wt-init-params",
+ Doc: "SWtInitParams for initial SWt values",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SPct", >i.Field{Name: "SPct", Type: "float32", LocalType: "float32", Doc: "how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed.", Directives: gti.Directives{}, Tag: "min:\"0\" max:\"1\" def:\"0,1,0.5\""}},
+ {"Mean", >i.Field{Name: "Mean", Type: "float32", LocalType: "float32", Doc: "target mean weight values across receiving neuron's projection -- the mean SWt values are constrained to remain at this value. some projections may benefit from lower mean of .4", Directives: gti.Directives{}, Tag: "def:\"0.5,0.4\""}},
+ {"Var", >i.Field{Name: "Var", Type: "float32", LocalType: "float32", Doc: "initial variance in weight values, prior to constraints.", Directives: gti.Directives{}, Tag: "def:\"0.25\""}},
+ {"Sym", >i.Field{Name: "Sym", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "symmetrize the initial weight values with those in reciprocal projection -- typically true for bidirectional excitatory connections", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SWtAdaptParams",
+ ShortName: "axon.SWtAdaptParams",
+ IDName: "s-wt-adapt-params",
+ Doc: "SWtAdaptParams manages adaptation of SWt values",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "if true, adaptation is active -- if false, SWt values are not updated, in which case it is generally good to have Init.SPct=0 too.", Directives: gti.Directives{}, Tag: ""}},
+ {"LRate", >i.Field{Name: "LRate", Type: "float32", LocalType: "float32", Doc: "learning rate multiplier on the accumulated DWt values (which already have fast LRate applied) to incorporate into SWt during slow outer loop updating -- lower values impose stronger constraints, for larger networks that need more structural support, e.g., 0.001 is better after 1,000 epochs in large models. 0.1 is fine for smaller models.", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.1,0.01,0.001,0.0002\""}},
+ {"SubMean", >i.Field{Name: "SubMean", Type: "float32", LocalType: "float32", Doc: "amount of mean to subtract from SWt delta when updating -- generally best to set to 1", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"1\""}},
+ {"SigGain", >i.Field{Name: "SigGain", Type: "float32", LocalType: "float32", Doc: "gain of sigmoidal constrast enhancement function used to transform learned, linear LWt values into Wt values", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"6\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SWtParams",
+ ShortName: "axon.SWtParams",
+ IDName: "s-wt-params",
+ Doc: "SWtParams manages structural, slowly adapting weight values (SWt),\nin terms of initialization and updating over course of learning.\nSWts impose initial and slowly adapting constraints on neuron connectivity\nto encourage differentiation of neuron representations and overall good behavior\nin terms of not hogging the representational space.\nThe TrgAvg activity constraint is not enforced through SWt -- it needs to be\nmore dynamic and supported by the regular learned weights.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"learn"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Init", >i.Field{Name: "Init", Type: "github.com/emer/axon/axon.SWtInitParams", LocalType: "SWtInitParams", Doc: "initialization of SWt values", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Adapt", >i.Field{Name: "Adapt", Type: "github.com/emer/axon/axon.SWtAdaptParams", LocalType: "SWtAdaptParams", Doc: "adaptation of SWt values in response to LWt learning", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Limit", >i.Field{Name: "Limit", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "range limits for SWt values", Directives: gti.Directives{}, Tag: "def:\"{'Min':0.2,'Max':0.8}\" view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LRateParams",
+ ShortName: "axon.LRateParams",
+ IDName: "l-rate-params",
+ Doc: "LRateParams manages learning rate parameters",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"learn"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Base", >i.Field{Name: "Base", Type: "float32", LocalType: "float32", Doc: "base learning rate for this projection -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2.", Directives: gti.Directives{}, Tag: "def:\"0.04,0.1,0.2\""}},
+ {"Sched", >i.Field{Name: "Sched", Type: "float32", LocalType: "float32", Doc: "scheduled learning rate multiplier, simulating reduction in plasticity over aging", Directives: gti.Directives{}, Tag: ""}},
+ {"Mod", >i.Field{Name: "Mod", Type: "float32", LocalType: "float32", Doc: "dynamic learning rate modulation due to neuromodulatory or other such factors", Directives: gti.Directives{}, Tag: ""}},
+ {"Eff", >i.Field{Name: "Eff", Type: "float32", LocalType: "float32", Doc: "effective actual learning rate multiplier used in computing DWt: Eff = eMod * Sched * Base", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.TraceParams",
+ ShortName: "axon.TraceParams",
+ IDName: "trace-params",
+ Doc: "TraceParams manages learning rate parameters",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Tau", >i.Field{Name: "Tau", Type: "float32", LocalType: "float32", Doc: "time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace", Directives: gti.Directives{}, Tag: "def:\"1,2,4\""}},
+ {"SubMean", >i.Field{Name: "SubMean", Type: "float32", LocalType: "float32", Doc: "amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning projections, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special prjn types (e.g., Hebb) benefit from SubMean = 1 always", Directives: gti.Directives{}, Tag: "def:\"0,1\""}},
+ {"LearnThr", >i.Field{Name: "LearnThr", Type: "float32", LocalType: "float32", Doc: "threshold for learning, depending on different algorithms -- in Matrix and VSPatch it applies to normalized GeIntNorm value -- setting this relatively high encourages sparser representations", Directives: gti.Directives{}, Tag: ""}},
+ {"Dt", >i.Field{Name: "Dt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\" inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LRateMod",
+ ShortName: "axon.LRateMod",
+ IDName: "l-rate-mod",
+ Doc: "LRateMod implements global learning rate modulation, based on a performance-based\nfactor, for example error. Increasing levels of the factor = higher learning rate.\nThis can be added to a Sim and called prior to DWt() to dynamically change lrate\nbased on overall network performance.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "toggle use of this modulation factor", Directives: gti.Directives{}, Tag: ""}},
+ {"Base", >i.Field{Name: "Base", Type: "float32", LocalType: "float32", Doc: "baseline learning rate -- what you get for correct cases", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" max:\"1\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"Range", >i.Field{Name: "Range", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LearnSynParams",
+ ShortName: "axon.LearnSynParams",
+ IDName: "learn-syn-params",
+ Doc: "LearnSynParams manages learning-related parameters at the synapse-level.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"learn"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Learn", >i.Field{Name: "Learn", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "enable learning for this projection", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"LRate", >i.Field{Name: "LRate", Type: "github.com/emer/axon/axon.LRateParams", LocalType: "LRateParams", Doc: "learning rate parameters, supporting two levels of modulation on top of base learning rate.", Directives: gti.Directives{}, Tag: "viewif:\"Learn\""}},
+ {"Trace", >i.Field{Name: "Trace", Type: "github.com/emer/axon/axon.TraceParams", LocalType: "TraceParams", Doc: "trace-based learning parameters", Directives: gti.Directives{}, Tag: "viewif:\"Learn\""}},
+ {"KinaseCa", >i.Field{Name: "KinaseCa", Type: "github.com/emer/axon/kinase.CaParams", LocalType: "kinase.CaParams", Doc: "kinase calcium Ca integration parameters", Directives: gti.Directives{}, Tag: "viewif:\"Learn\" view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.Network",
+ ShortName: "axon.Network",
+ IDName: "network",
+ Doc: "axon.Network implements the Axon spiking model,\nbuilding on the algorithm-independent NetworkBase that manages\nall the infrastructure.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"NetworkBase", >i.Field{Name: "NetworkBase", Type: "github.com/emer/axon/axon.NetworkBase", LocalType: "NetworkBase", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.NetworkBase",
+ ShortName: "axon.NetworkBase",
+ IDName: "network-base",
+ Doc: "NetworkBase manages the basic structural components of a network (layers).\nThe main Network then can just have the algorithm-specific code.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"EmerNet", >i.Field{Name: "EmerNet", Type: "github.com/emer/emergent/v2/emer.Network", LocalType: "emer.Network", Doc: "we need a pointer to ourselves as an emer.Network, which can always be used to extract the true underlying type of object when network is embedded in other structs -- function receivers do not have this ability so this is necessary.", Directives: gti.Directives{}, Tag: "copy:\"-\" json:\"-\" xml:\"-\" view:\"-\""}},
+ {"Nm", >i.Field{Name: "Nm", Type: "string", LocalType: "string", Doc: "overall name of network -- helps discriminate if there are multiple", Directives: gti.Directives{}, Tag: ""}},
+ {"WtsFile", >i.Field{Name: "WtsFile", Type: "string", LocalType: "string", Doc: "filename of last weights file loaded or saved", Directives: gti.Directives{}, Tag: ""}},
+ {"PVLV", >i.Field{Name: "PVLV", Type: "github.com/emer/axon/axon.PVLV", LocalType: "PVLV", Doc: "PVLV system for phasic dopamine signaling, including internal drives, US outcomes. Core LHb (lateral habenula) and VTA (ventral tegmental area) dopamine are computed in equations using inputs from specialized network layers (LDTLayer driven by BLA, CeM layers, VSPatchLayer). Renders USLayer, PVLayer, DrivesLayer representations based on state updated here.", Directives: gti.Directives{}, Tag: ""}},
+ {"LayMap", >i.Field{Name: "LayMap", Type: "map[string]*github.com/emer/axon/axon.Layer", LocalType: "map[string]*Layer", Doc: "map of name to layers -- layer names must be unique", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"LayClassMap", >i.Field{Name: "LayClassMap", Type: "map[string][]string", LocalType: "map[string][]string", Doc: "map of layer classes -- made during Build", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"MinPos", >i.Field{Name: "MinPos", Type: "goki.dev/mat32/v2.Vec3", LocalType: "mat32.Vec3", Doc: "minimum display position in network", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"MaxPos", >i.Field{Name: "MaxPos", Type: "goki.dev/mat32/v2.Vec3", LocalType: "mat32.Vec3", Doc: "maximum display position in network", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"MetaData", >i.Field{Name: "MetaData", Type: "map[string]string", LocalType: "map[string]string", Doc: "optional metadata that is saved in network weights files -- e.g., can indicate number of epochs that were trained, or any other information about this network that would be useful to save", Directives: gti.Directives{}, Tag: ""}},
+ {"UseGPUOrder", >i.Field{Name: "UseGPUOrder", Type: "bool", LocalType: "bool", Doc: "if true, the neuron and synapse variables will be organized into a gpu-optimized memory order, otherwise cpu-optimized. This must be set before network Build() is called.", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NetIdx", >i.Field{Name: "NetIdx", Type: "uint32", LocalType: "uint32", Doc: "network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"MaxDelay", >i.Field{Name: "MaxDelay", Type: "uint32", LocalType: "uint32", Doc: "maximum synaptic delay across any projection in the network -- used for sizing the GBuf accumulation buffer.", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"-\""}},
+ {"MaxData", >i.Field{Name: "MaxData", Type: "uint32", LocalType: "uint32", Doc: "maximum number of data inputs that can be processed in parallel in one pass of the network. Neuron storage is allocated to hold this amount during Build process, and this value reflects that.", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NNeurons", >i.Field{Name: "NNeurons", Type: "uint32", LocalType: "uint32", Doc: "total number of neurons", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NSyns", >i.Field{Name: "NSyns", Type: "uint32", LocalType: "uint32", Doc: "total number of synapses", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Globals", >i.Field{Name: "Globals", Type: "[]float32", LocalType: "[]float32", Doc: "storage for global vars", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Layers", >i.Field{Name: "Layers", Type: "[]*github.com/emer/axon/axon.Layer", LocalType: "[]*Layer", Doc: "array of layers", Directives: gti.Directives{}, Tag: ""}},
+ {"LayParams", >i.Field{Name: "LayParams", Type: "[]github.com/emer/axon/axon.LayerParams", LocalType: "[]LayerParams", Doc: "array of layer parameters, in 1-to-1 correspondence with Layers", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"LayVals", >i.Field{Name: "LayVals", Type: "[]github.com/emer/axon/axon.LayerVals", LocalType: "[]LayerVals", Doc: "array of layer values, with extra per data", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Pools", >i.Field{Name: "Pools", Type: "[]github.com/emer/axon/axon.Pool", LocalType: "[]Pool", Doc: "array of inhibitory pools for all layers.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Neurons", >i.Field{Name: "Neurons", Type: "[]float32", LocalType: "[]float32", Doc: "entire network's allocation of neuron variables, accessed via NrnV function with flexible striding", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"NeuronAvgs", >i.Field{Name: "NeuronAvgs", Type: "[]float32", LocalType: "[]float32", Doc: "] entire network's allocation of neuron average avariables, accessed via NrnAvgV function with flexible striding", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"NeuronIxs", >i.Field{Name: "NeuronIxs", Type: "[]uint32", LocalType: "[]uint32", Doc: "entire network's allocation of neuron index variables, accessed via NrnI function with flexible striding", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Prjns", >i.Field{Name: "Prjns", Type: "[]*github.com/emer/axon/axon.Prjn", LocalType: "[]*Prjn", Doc: "pointers to all projections in the network, sender-based", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"PrjnParams", >i.Field{Name: "PrjnParams", Type: "[]github.com/emer/axon/axon.PrjnParams", LocalType: "[]PrjnParams", Doc: "array of projection parameters, in 1-to-1 correspondence with Prjns, sender-based", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseIxs", >i.Field{Name: "SynapseIxs", Type: "[]uint32", LocalType: "[]uint32", Doc: "entire network's allocation of synapse idx vars, organized sender-based, with flexible striding, accessed via SynI function", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Synapses", >i.Field{Name: "Synapses", Type: "[]float32", LocalType: "[]float32", Doc: "entire network's allocation of synapses, organized sender-based, with flexible striding, accessed via SynV function", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SynapseCas", >i.Field{Name: "SynapseCas", Type: "[]float32", LocalType: "[]float32", Doc: "entire network's allocation of synapse Ca vars, organized sender-based, with flexible striding, accessed via SynCaV function", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"PrjnSendCon", >i.Field{Name: "PrjnSendCon", Type: "[]github.com/emer/axon/axon.StartN", LocalType: "[]StartN", Doc: "starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"PrjnRecvCon", >i.Field{Name: "PrjnRecvCon", Type: "[]github.com/emer/axon/axon.StartN", LocalType: "[]StartN", Doc: "starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"PrjnGBuf", >i.Field{Name: "PrjnGBuf", Type: "[]int32", LocalType: "[]int32", Doc: "conductance buffer for accumulating spikes -- subslices are allocated to each projection -- uses int-encoded float values for faster GPU atomic integration", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"PrjnGSyns", >i.Field{Name: "PrjnGSyns", Type: "[]float32", LocalType: "[]float32", Doc: "synaptic conductance integrated over time per projection per recv neurons -- spikes come in via PrjnBuf -- subslices are allocated to each projection", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RecvPrjnIdxs", >i.Field{Name: "RecvPrjnIdxs", Type: "[]uint32", LocalType: "[]uint32", Doc: "indexes into Prjns (organized by SendPrjn) organized by recv projections -- needed for iterating through recv prjns efficiently on GPU.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RecvSynIdxs", >i.Field{Name: "RecvSynIdxs", Type: "[]uint32", LocalType: "[]uint32", Doc: "indexes into Synapses for each recv neuron, organized into blocks according to PrjnRecvCon, for receiver-based access.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Exts", >i.Field{Name: "Exts", Type: "[]float32", LocalType: "[]float32", Doc: "external input values for all Input / Target / Compare layers in the network -- the ApplyExt methods write to this per layer, and it is then actually applied in one consistent method.", Directives: gti.Directives{}, Tag: ""}},
+ {"Ctx", >i.Field{Name: "Ctx", Type: "github.com/emer/axon/axon.Context", LocalType: "Context", Doc: "context used only for accessing neurons for display -- NetIdxs.NData in here is copied from active context in NewState", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Rand", >i.Field{Name: "Rand", Type: "github.com/emer/emergent/v2/erand.SysRand", LocalType: "erand.SysRand", Doc: "random number generator for the network -- all random calls must use this -- set seed here for weight initialization values", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeed", >i.Field{Name: "RndSeed", Type: "int64", LocalType: "int64", Doc: "random seed to be set at the start of configuring the network and initializing the weights -- set this to get a different set of weights", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of threads to use for parallel processing", Directives: gti.Directives{}, Tag: ""}},
+ {"GPU", >i.Field{Name: "GPU", Type: "github.com/emer/axon/axon.GPU", LocalType: "GPU", Doc: "GPU implementation", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"RecFunTimes", >i.Field{Name: "RecFunTimes", Type: "bool", LocalType: "bool", Doc: "record function timer information", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"FunTimes", >i.Field{Name: "FunTimes", Type: "map[string]*github.com/emer/emergent/v2/timer.Time", LocalType: "map[string]*timer.Time", Doc: "timers for each major function (step of processing)", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.DAModTypes",
+ ShortName: "axon.DAModTypes",
+ IDName: "da-mod-types",
+ Doc: "DAModTypes are types of dopamine modulation of neural activity.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"neuromod"}},
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.ValenceTypes",
+ ShortName: "axon.ValenceTypes",
+ IDName: "valence-types",
+ Doc: "ValenceTypes are types of valence coding: positive or negative.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.NeuroModParams",
+ ShortName: "axon.NeuroModParams",
+ IDName: "neuro-mod-params",
+ Doc: "NeuroModParams specifies the effects of neuromodulators on neural\nactivity and learning rate. These can apply to any neuron type,\nand are applied in the core cycle update equations.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"DAMod", >i.Field{Name: "DAMod", Type: "github.com/emer/axon/axon.DAModTypes", LocalType: "DAModTypes", Doc: "dopamine receptor-based effects of dopamine modulation on excitatory and inhibitory conductances: D1 is excitatory, D2 is inhibitory as a function of increasing dopamine", Directives: gti.Directives{}, Tag: ""}},
+ {"Valence", >i.Field{Name: "Valence", Type: "github.com/emer/axon/axon.ValenceTypes", LocalType: "ValenceTypes", Doc: "valence coding of this layer -- may affect specific layer types but does not directly affect neuromodulators currently", Directives: gti.Directives{}, Tag: ""}},
+ {"DAModGain", >i.Field{Name: "DAModGain", Type: "float32", LocalType: "float32", Doc: "multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor", Directives: gti.Directives{}, Tag: "viewif:\"DAMod!=NoDAMod\""}},
+ {"DALRateSign", >i.Field{Name: "DALRateSign", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "modulate the sign of the learning rate factor according to the DA sign, taking into account the DAMod sign reversal for D2Mod, also using BurstGain and DipGain to modulate DA value -- otherwise, only the magnitude of the learning rate is modulated as a function of raw DA magnitude according to DALRateMod (without additional gain factors)", Directives: gti.Directives{}, Tag: ""}},
+ {"DALRateMod", >i.Field{Name: "DALRateMod", Type: "float32", LocalType: "float32", Doc: "if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%", Directives: gti.Directives{}, Tag: "min:\"0\" max:\"1\" viewif:\"!DALRateSign\""}},
+ {"AChLRateMod", >i.Field{Name: "AChLRateMod", Type: "float32", LocalType: "float32", Doc: "proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%", Directives: gti.Directives{}, Tag: "min:\"0\" max:\"1\""}},
+ {"AChDisInhib", >i.Field{Name: "AChDisInhib", Type: "float32", LocalType: "float32", Doc: "amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory", Directives: gti.Directives{}, Tag: "min:\"0\" def:\"0,5\""}},
+ {"BurstGain", >i.Field{Name: "BurstGain", Type: "float32", LocalType: "float32", Doc: "multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign!", Directives: gti.Directives{}, Tag: "min:\"0\" def:\"1\""}},
+ {"DipGain", >i.Field{Name: "DipGain", Type: "float32", LocalType: "float32", Doc: "multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext", Directives: gti.Directives{}, Tag: "min:\"0\" def:\"1\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.NeuronFlags",
+ ShortName: "axon.NeuronFlags",
+ IDName: "neuron-flags",
+ Doc: "NeuronFlags are bit-flags encoding relevant binary state for neurons",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"neuron"}},
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.NeuronVars",
+ ShortName: "axon.NeuronVars",
+ IDName: "neuron-vars",
+ Doc: "NeuronVars are the neuron variables representing current active state,\nspecific to each input data state.\nSee NeuronAvgVars for vars shared across data.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.NeuronAvgVars",
+ ShortName: "axon.NeuronAvgVars",
+ IDName: "neuron-avg-vars",
+ Doc: "NeuronAvgVars are mostly neuron variables involved in longer-term average activity\nwhich is aggregated over time and not specific to each input data state,\nalong with any other state that is not input data specific.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.NeuronIdxs",
+ ShortName: "axon.NeuronIdxs",
+ IDName: "neuron-idxs",
+ Doc: "NeuronIdxs are the neuron indexes and other uint32 values.\nThere is only one of these per neuron -- not data parallel.\nnote: Flags are encoded in Vars because they are data parallel and\nwritable, whereas indexes are read-only.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.NeuronVarStrides",
+ ShortName: "axon.NeuronVarStrides",
+ IDName: "neuron-var-strides",
+ Doc: "NeuronVarStrides encodes the stride offsets for neuron variable access\ninto network float32 array. Data is always the inner-most variable.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"neuron"}},
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"neuron"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"neuron"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"neuron"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Neuron", >i.Field{Name: "Neuron", Type: "uint32", LocalType: "uint32", Doc: "neuron level", Directives: gti.Directives{}, Tag: ""}},
+ {"Var", >i.Field{Name: "Var", Type: "uint32", LocalType: "uint32", Doc: "variable level", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.NeuronAvgVarStrides",
+ ShortName: "axon.NeuronAvgVarStrides",
+ IDName: "neuron-avg-var-strides",
+ Doc: "NeuronAvgVarStrides encodes the stride offsets for neuron variable access\ninto network float32 array. Data is always the inner-most variable.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Neuron", >i.Field{Name: "Neuron", Type: "uint32", LocalType: "uint32", Doc: "neuron level", Directives: gti.Directives{}, Tag: ""}},
+ {"Var", >i.Field{Name: "Var", Type: "uint32", LocalType: "uint32", Doc: "variable level", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.NeuronIdxStrides",
+ ShortName: "axon.NeuronIdxStrides",
+ IDName: "neuron-idx-strides",
+ Doc: "NeuronIdxStrides encodes the stride offsets for neuron index access\ninto network uint32 array.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Neuron", >i.Field{Name: "Neuron", Type: "uint32", LocalType: "uint32", Doc: "neuron level", Directives: gti.Directives{}, Tag: ""}},
+ {"Index", >i.Field{Name: "Index", Type: "uint32", LocalType: "uint32", Doc: "index value level", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.MatrixParams",
+ ShortName: "axon.MatrixParams",
+ IDName: "matrix-params",
+ Doc: "MatrixParams has parameters for BG Striatum Matrix MSN layers\nThese are the main Go / NoGo gating units in BG.\nDA, ACh learning rate modulation is pre-computed on the recv neuron\nRLRate variable via NeuroMod. Also uses Pool.Gated for InvertNoGate,\nupdated in PlusPhase prior to DWt call.\nMust set Learn.NeuroMod.DAMod = D1Mod or D2Mod via SetBuildConfig(\"DAMod\").",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"pcore_layers"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GateThr", >i.Field{Name: "GateThr", Type: "float32", LocalType: "float32", Doc: "threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated", Directives: gti.Directives{}, Tag: "def:\"0.05\""}},
+ {"IsVS", >i.Field{Name: "IsVS", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "is this a ventral striatum (VS) matrix layer? if true, the gating status of this layer is recorded in the Global state, and used for updating effort and other factors.", Directives: gti.Directives{}, Tag: ""}},
+ {"OtherMatrixIdx", >i.Field{Name: "OtherMatrixIdx", Type: "int32", LocalType: "int32", Doc: "index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ThalLay1Idx", >i.Field{Name: "ThalLay1Idx", Type: "int32", LocalType: "int32", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ThalLay2Idx", >i.Field{Name: "ThalLay2Idx", Type: "int32", LocalType: "int32", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay2Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ThalLay3Idx", >i.Field{Name: "ThalLay3Idx", Type: "int32", LocalType: "int32", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay3Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ThalLay4Idx", >i.Field{Name: "ThalLay4Idx", Type: "int32", LocalType: "int32", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay4Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ThalLay5Idx", >i.Field{Name: "ThalLay5Idx", Type: "int32", LocalType: "int32", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay5Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ThalLay6Idx", >i.Field{Name: "ThalLay6Idx", Type: "int32", LocalType: "int32", Doc: "index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay6Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.GPLayerTypes",
+ ShortName: "axon.GPLayerTypes",
+ IDName: "gp-layer-types",
+ Doc: "GPLayerTypes is a GPLayer axon-specific layer type enum.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.GPParams",
+ ShortName: "axon.GPParams",
+ IDName: "gp-params",
+ Doc: "GPLayer represents a globus pallidus layer, including:\nGPeOut, GPeIn, GPeTA (arkypallidal), and GPi (see GPType for type).\nTypically just a single unit per Pool representing a given stripe.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPType", >i.Field{Name: "GPType", Type: "github.com/emer/axon/axon.GPLayerTypes", LocalType: "GPLayerTypes", Doc: "type of GP Layer -- must set during config using SetBuildConfig of GPType.", Directives: gti.Directives{}, Tag: "viewif:\"LayType=GPLayer\" view:\"inline\""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.MatrixPrjnParams",
+ ShortName: "axon.MatrixPrjnParams",
+ IDName: "matrix-prjn-params",
+ Doc: "MatrixPrjnParams for trace-based learning in the MatrixPrjn.\nA trace of synaptic co-activity is formed, and then modulated by dopamine\nwhenever it occurs. This bridges the temporal gap between gating activity\nand subsequent activity, and is based biologically on synaptic tags.\nTrace is applied to DWt and reset at the time of reward.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"pcore_prjns"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"NoGateLRate", >i.Field{Name: "NoGateLRate", Type: "float32", LocalType: "float32", Doc: "learning rate for when ACh was elevated but no gating took place, in proportion to the level of ACh that indicates the salience of the event. A low level of this learning prevents the highly maladaptive situation where the BG is not gating and thus no learning can occur.", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.AvgMaxPhases",
+ ShortName: "axon.AvgMaxPhases",
+ IDName: "avg-max-phases",
+ Doc: "AvgMaxPhases contains the average and maximum values over a Pool of neurons,\nat different time scales within a standard ThetaCycle of updating.\nIt is much more efficient on the GPU to just grab everything in one pass at\nthe cycle level, and then take snapshots from there.\nAll of the cycle level values are updated at the *start* of the cycle\nbased on values from the prior cycle -- thus are 1 cycle behind in general.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"pool"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"pool"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"pool"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Cycle", >i.Field{Name: "Cycle", Type: "github.com/emer/axon/axon.AvgMaxI32", LocalType: "AvgMaxI32", Doc: "updated every cycle -- this is the source of all subsequent time scales", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Minus", >i.Field{Name: "Minus", Type: "github.com/emer/axon/axon.AvgMaxI32", LocalType: "AvgMaxI32", Doc: "at the end of the minus phase", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Plus", >i.Field{Name: "Plus", Type: "github.com/emer/axon/axon.AvgMaxI32", LocalType: "AvgMaxI32", Doc: "at the end of the plus phase", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Prev", >i.Field{Name: "Prev", Type: "github.com/emer/axon/axon.AvgMaxI32", LocalType: "AvgMaxI32", Doc: "at the end of the previous plus phase", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PoolAvgMax",
+ ShortName: "axon.PoolAvgMax",
+ IDName: "pool-avg-max",
+ Doc: "PoolAvgMax contains the average and maximum values over a Pool of neurons\nfor different variables of interest, at Cycle, Minus and Plus phase timescales.\nAll of the cycle level values are updated at the *start* of the cycle\nbased on values from the prior cycle -- thus are 1 cycle behind in general.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"CaSpkP", >i.Field{Name: "CaSpkP", Type: "github.com/emer/axon/axon.AvgMaxPhases", LocalType: "AvgMaxPhases", Doc: "avg and maximum CaSpkP (continuously updated at roughly 40 msec integration window timescale, ends up capturing potentiation, plus-phase signal) -- this is the primary variable to use for tracking overall pool activity", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"CaSpkD", >i.Field{Name: "CaSpkD", Type: "github.com/emer/axon/axon.AvgMaxPhases", LocalType: "AvgMaxPhases", Doc: "avg and maximum CaSpkD longer-term depression / DAPK1 signal in layer", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"SpkMax", >i.Field{Name: "SpkMax", Type: "github.com/emer/axon/axon.AvgMaxPhases", LocalType: "AvgMaxPhases", Doc: "avg and maximum SpkMax value (based on CaSpkP) -- reflects peak activity at any point across the cycle", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"Act", >i.Field{Name: "Act", Type: "github.com/emer/axon/axon.AvgMaxPhases", LocalType: "AvgMaxPhases", Doc: "avg and maximum Act firing rate value", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"GeInt", >i.Field{Name: "GeInt", Type: "github.com/emer/axon/axon.AvgMaxPhases", LocalType: "AvgMaxPhases", Doc: "avg and maximum GeInt integrated running-average excitatory conductance value", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"GiInt", >i.Field{Name: "GiInt", Type: "github.com/emer/axon/axon.AvgMaxPhases", LocalType: "AvgMaxPhases", Doc: "avg and maximum GiInt integrated running-average inhibitory conductance value", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.Pool",
+ ShortName: "axon.Pool",
+ IDName: "pool",
+ Doc: "Pool contains computed values for FS-FFFB inhibition,\nand various other state values for layers\nand pools (unit groups) that can be subject to inhibition",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"pool"}},
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"pool"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"pool"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"pool"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"StIdx", >i.Field{Name: "StIdx", Type: "uint32", LocalType: "uint32", Doc: "starting and ending (exlusive) layer-wise indexes for the list of neurons in this pool", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"LayIdx", >i.Field{Name: "LayIdx", Type: "uint32", LocalType: "uint32", Doc: "layer index in global layer list", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"DataIdx", >i.Field{Name: "DataIdx", Type: "uint32", LocalType: "uint32", Doc: "data parallel index (innermost index per layer)", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"PoolIdx", >i.Field{Name: "PoolIdx", Type: "uint32", LocalType: "uint32", Doc: "pool index in global pool list:", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"IsLayPool", >i.Field{Name: "IsLayPool", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "is this a layer-wide pool? if not, it represents a sub-pool of units within a 4D layer", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Gated", >i.Field{Name: "Gated", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "for special types where relevant (e.g., MatrixLayer, BGThalLayer), indicates if the pool was gated", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"Inhib", >i.Field{Name: "Inhib", Type: "github.com/emer/axon/fsfffb.Inhib", LocalType: "fsfffb.Inhib", Doc: "fast-slow FFFB inhibition values", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"AvgMax", >i.Field{Name: "AvgMax", Type: "github.com/emer/axon/axon.PoolAvgMax", LocalType: "PoolAvgMax", Doc: "average and max values for relevant variables in this pool, at different time scales", Directives: gti.Directives{}, Tag: ""}},
+ {"AvgDif", >i.Field{Name: "AvgDif", Type: "github.com/emer/axon/axon.AvgMaxI32", LocalType: "AvgMaxI32", Doc: "absolute value of AvgDif differences from actual neuron ActPct relative to TrgAvg", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.Prjn",
+ ShortName: "axon.Prjn",
+ IDName: "prjn",
+ Doc: "axon.Prjn is a basic Axon projection with synaptic learning parameters",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Params", >i.Field{Name: "Params", Type: "*github.com/emer/axon/axon.PrjnParams", LocalType: "*PrjnParams", Doc: "all prjn-level parameters -- these must remain constant once configured", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"PrjnBase", >i.Field{Name: "PrjnBase", Type: "github.com/emer/axon/axon.PrjnBase", LocalType: "PrjnBase", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PrjnBase",
+ ShortName: "axon.PrjnBase",
+ IDName: "prjn-base",
+ Doc: "PrjnBase contains the basic structural information for specifying a projection of synaptic\nconnections between two layers, and maintaining all the synaptic connection-level data.\nThe same struct token is added to the Recv and Send layer prjn lists, and it manages everything\nabout the connectivity, and methods on the Prjn handle all the relevant computation.\nThe Base does not have algorithm-specific methods and parameters, so it can be easily\nreused for different algorithms, and cleanly separates the algorithm-specific code.\nAny dependency on the algorithm-level Prjn can be captured in the AxonPrjn interface,\naccessed via the AxonPrj field.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"AxonPrj", >i.Field{Name: "AxonPrj", Type: "github.com/emer/axon/axon.AxonPrjn", LocalType: "AxonPrjn", Doc: "we need a pointer to ourselves as an AxonPrjn, which can always be used to extract the true underlying type of object when prjn is embedded in other structs -- function receivers do not have this ability so this is necessary.", Directives: gti.Directives{}, Tag: "copy:\"-\" json:\"-\" xml:\"-\" view:\"-\""}},
+ {"Off", >i.Field{Name: "Off", Type: "bool", LocalType: "bool", Doc: "inactivate this projection -- allows for easy experimentation", Directives: gti.Directives{}, Tag: ""}},
+ {"Cls", >i.Field{Name: "Cls", Type: "string", LocalType: "string", Doc: "Class is for applying parameter styles, can be space separated multple tags", Directives: gti.Directives{}, Tag: ""}},
+ {"Notes", >i.Field{Name: "Notes", Type: "string", LocalType: "string", Doc: "can record notes about this projection here", Directives: gti.Directives{}, Tag: ""}},
+ {"Send", >i.Field{Name: "Send", Type: "*github.com/emer/axon/axon.Layer", LocalType: "*Layer", Doc: "sending layer for this projection", Directives: gti.Directives{}, Tag: ""}},
+ {"Recv", >i.Field{Name: "Recv", Type: "*github.com/emer/axon/axon.Layer", LocalType: "*Layer", Doc: "receiving layer for this projection", Directives: gti.Directives{}, Tag: ""}},
+ {"Pat", >i.Field{Name: "Pat", Type: "github.com/emer/emergent/v2/prjn.Pattern", LocalType: "prjn.Pattern", Doc: "pattern of connectivity", Directives: gti.Directives{}, Tag: "tableview:\"-\""}},
+ {"Typ", >i.Field{Name: "Typ", Type: "github.com/emer/axon/axon.PrjnTypes", LocalType: "PrjnTypes", Doc: "type of projection -- Forward, Back, Lateral, or extended type in specialized algorithms -- matches against .Cls parameter styles (e.g., .Back etc)", Directives: gti.Directives{}, Tag: ""}},
+ {"DefParams", >i.Field{Name: "DefParams", Type: "github.com/emer/emergent/v2/params.Params", LocalType: "params.Params", Doc: "default parameters that are applied prior to user-set parameters -- these are useful for specific functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a prjn type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map.", Directives: gti.Directives{}, Tag: "tableview:\"-\""}},
+ {"ParamsHistory", >i.Field{Name: "ParamsHistory", Type: "github.com/emer/emergent/v2/params.HistoryImpl", LocalType: "params.HistoryImpl", Doc: "provides a history of parameters applied to the layer", Directives: gti.Directives{}, Tag: "tableview:\"-\""}},
+ {"RecvConNAvgMax", >i.Field{Name: "RecvConNAvgMax", Type: "goki.dev/etable/v2/minmax.AvgMax32", LocalType: "minmax.AvgMax32", Doc: "average and maximum number of recv connections in the receiving layer", Directives: gti.Directives{}, Tag: "tableview:\"-\" inactive:\"+\" view:\"inline\""}},
+ {"SendConNAvgMax", >i.Field{Name: "SendConNAvgMax", Type: "goki.dev/etable/v2/minmax.AvgMax32", LocalType: "minmax.AvgMax32", Doc: "average and maximum number of sending connections in the sending layer", Directives: gti.Directives{}, Tag: "tableview:\"-\" inactive:\"+\" view:\"inline\""}},
+ {"SynStIdx", >i.Field{Name: "SynStIdx", Type: "uint32", LocalType: "uint32", Doc: "start index into global Synapse array:", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"NSyns", >i.Field{Name: "NSyns", Type: "uint32", LocalType: "uint32", Doc: "number of synapses in this projection", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RecvCon", >i.Field{Name: "RecvCon", Type: "[]github.com/emer/axon/axon.StartN", LocalType: "[]StartN", Doc: "starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnRecvCons slice for GPU usage.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RecvSynIdx", >i.Field{Name: "RecvSynIdx", Type: "[]uint32", LocalType: "[]uint32", Doc: "index into Syns synaptic state for each sending unit and connection within that, for the sending projection which does not own the synapses, and instead indexes into recv-ordered list", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RecvConIdx", >i.Field{Name: "RecvConIdx", Type: "[]uint32", LocalType: "[]uint32", Doc: "for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SendCon", >i.Field{Name: "SendCon", Type: "[]github.com/emer/axon/axon.StartN", LocalType: "[]StartN", Doc: "starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnSendCons slice for GPU usage.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SendConIdx", >i.Field{Name: "SendConIdx", Type: "[]uint32", LocalType: "[]uint32", Doc: "index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"GBuf", >i.Field{Name: "GBuf", Type: "[]int32", LocalType: "[]int32", Doc: "Ge or Gi conductance ring buffer for each neuron, accessed through Params.Com.ReadIdx, WriteIdx -- scale * weight is added with Com delay offset -- a subslice from network PrjnGBuf. Uses int-encoded float values for faster GPU atomic integration", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"GSyns", >i.Field{Name: "GSyns", Type: "[]float32", LocalType: "[]float32", Doc: "projection-level synaptic conductance values, integrated by prjn before being integrated at the neuron level, which enables the neuron to perform non-linear integration as needed -- a subslice from network PrjnGSyn.", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.StartN",
+ ShortName: "axon.StartN",
+ IDName: "start-n",
+ Doc: "StartN holds a starting offset index and a number of items\narranged from Start to Start+N (exclusive).\nThis is not 16 byte padded and only for use on CPU side.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"prjnparams"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"prjnparams"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"prjnparams"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Start", >i.Field{Name: "Start", Type: "uint32", LocalType: "uint32", Doc: "starting offset", Directives: gti.Directives{}, Tag: ""}},
+ {"N", >i.Field{Name: "N", Type: "uint32", LocalType: "uint32", Doc: "number of items --", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PrjnIdxs",
+ ShortName: "axon.PrjnIdxs",
+ IDName: "prjn-idxs",
+ Doc: "PrjnIdxs contains prjn-level index information into global memory arrays",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"PrjnIdx", >i.Field{Name: "PrjnIdx", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"RecvLay", >i.Field{Name: "RecvLay", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"RecvNeurSt", >i.Field{Name: "RecvNeurSt", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"RecvNeurN", >i.Field{Name: "RecvNeurN", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"SendLay", >i.Field{Name: "SendLay", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"SendNeurSt", >i.Field{Name: "SendNeurSt", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"SendNeurN", >i.Field{Name: "SendNeurN", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"SynapseSt", >i.Field{Name: "SynapseSt", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"SendConSt", >i.Field{Name: "SendConSt", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"RecvConSt", >i.Field{Name: "RecvConSt", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"RecvSynSt", >i.Field{Name: "RecvSynSt", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"GBufSt", >i.Field{Name: "GBufSt", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"GSynSt", >i.Field{Name: "GSynSt", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.GScaleVals",
+ ShortName: "axon.GScaleVals",
+ IDName: "g-scale-vals",
+ Doc: "GScaleVals holds the conductance scaling values.\nThese are computed once at start and remain constant thereafter,\nand therefore belong on Params and not on PrjnVals.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Scale", >i.Field{Name: "Scale", Type: "float32", LocalType: "float32", Doc: "scaling factor for integrating synaptic input conductances (G's), originally computed as a function of sending layer activity and number of connections, and typically adapted from there -- see Prjn.PrjnScale adapt params", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Rel", >i.Field{Name: "Rel", Type: "float32", LocalType: "float32", Doc: "normalized relative proportion of total receiving conductance for this projection: PrjnScale.Rel / sum(PrjnScale.Rel across relevant prjns)", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PrjnParams",
+ ShortName: "axon.PrjnParams",
+ IDName: "prjn-params",
+ Doc: "PrjnParams contains all of the prjn parameters.\nThese values must remain constant over the course of computation.\nOn the GPU, they are loaded into a uniform.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"PrjnType", >i.Field{Name: "PrjnType", Type: "github.com/emer/axon/axon.PrjnTypes", LocalType: "PrjnTypes", Doc: "functional type of prjn -- determines functional code path for specialized layer types, and is synchronized with the Prjn.Typ value", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"Idxs", >i.Field{Name: "Idxs", Type: "github.com/emer/axon/axon.PrjnIdxs", LocalType: "PrjnIdxs", Doc: "recv and send neuron-level projection index array access info", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Com", >i.Field{Name: "Com", Type: "github.com/emer/axon/axon.SynComParams", LocalType: "SynComParams", Doc: "synaptic communication parameters: delay, probability of failure", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"PrjnScale", >i.Field{Name: "PrjnScale", Type: "github.com/emer/axon/axon.PrjnScaleParams", LocalType: "PrjnScaleParams", Doc: "projection scaling parameters for computing GScale: modulates overall strength of projection, using both absolute and relative factors, with adaptation option to maintain target max conductances", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"SWts", >i.Field{Name: "SWts", Type: "github.com/emer/axon/axon.SWtParams", LocalType: "SWtParams", Doc: "slowly adapting, structural weight value parameters, which control initial weight values and slower outer-loop adjustments", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Learn", >i.Field{Name: "Learn", Type: "github.com/emer/axon/axon.LearnSynParams", LocalType: "LearnSynParams", Doc: "synaptic-level learning parameters for learning in the fast LWt values.", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"GScale", >i.Field{Name: "GScale", Type: "github.com/emer/axon/axon.GScaleVals", LocalType: "GScaleVals", Doc: "conductance scaling values", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"RLPred", >i.Field{Name: "RLPred", Type: "github.com/emer/axon/axon.RLPredPrjnParams", LocalType: "RLPredPrjnParams", Doc: "] Params for RWPrjn and TDPredPrjn for doing dopamine-modulated learning for reward prediction: Da * Send activity. Use in RWPredLayer or TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.", Directives: gti.Directives{}, Tag: "viewif:\"PrjnType=[RWPrjn,TDPredPrjn]\" view:\"inline\""}},
+ {"Matrix", >i.Field{Name: "Matrix", Type: "github.com/emer/axon/axon.MatrixPrjnParams", LocalType: "MatrixPrjnParams", Doc: "for trace-based learning in the MatrixPrjn. A trace of synaptic co-activity is formed, and then modulated by dopamine whenever it occurs. This bridges the temporal gap between gating activity and subsequent activity, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level from CINs.", Directives: gti.Directives{}, Tag: "viewif:\"PrjnType=MatrixPrjn\" view:\"inline\""}},
+ {"BLA", >i.Field{Name: "BLA", Type: "github.com/emer/axon/axon.BLAPrjnParams", LocalType: "BLAPrjnParams", Doc: "Basolateral Amygdala projection parameters.", Directives: gti.Directives{}, Tag: "viewif:\"PrjnType=BLAPrjn\" view:\"inline\""}},
+ {"Hip", >i.Field{Name: "Hip", Type: "github.com/emer/axon/axon.HipPrjnParams", LocalType: "HipPrjnParams", Doc: "Hip bench parameters.", Directives: gti.Directives{}, Tag: "viewif:\"PrjnType=HipPrjn\" view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PrjnTypes",
+ ShortName: "axon.PrjnTypes",
+ IDName: "prjn-types",
+ Doc: "PrjnTypes is an axon-specific prjn type enum,\nthat encompasses all the different algorithm types supported.\nClass parameter styles automatically key off of these types.\nThe first entries must be kept synchronized with the emer.PrjnType.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"prjntypes"}},
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.DriveParams",
+ ShortName: "axon.DriveParams",
+ IDName: "drive-params",
+ Doc: "DriveParams manages the drive parameters for computing and updating drive state.\nMost of the params are for optional case where drives are automatically\nupdated based on US consumption (which satisfies drives) and time passing\n(which increases drives).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"DriveMin", >i.Field{Name: "DriveMin", Type: "float32", LocalType: "float32", Doc: "minimum effective drive value -- this is an automatic baseline ensuring that a positive US results in at least some minimal level of reward. Unlike Base values, this is not reflected in the activity of the drive values -- applies at the time of reward calculation as a minimum baseline.", Directives: gti.Directives{}, Tag: ""}},
+ {"Base", >i.Field{Name: "Base", Type: "[]float32", LocalType: "[]float32", Doc: "baseline levels for each drive -- what they naturally trend toward in the absence of any input. Set inactive drives to 0 baseline, active ones typically elevated baseline (0-1 range).", Directives: gti.Directives{}, Tag: ""}},
+ {"Tau", >i.Field{Name: "Tau", Type: "[]float32", LocalType: "[]float32", Doc: "time constants in ThetaCycle (trial) units for natural update toward Base values -- 0 values means no natural update.", Directives: gti.Directives{}, Tag: ""}},
+ {"Satisfaction", >i.Field{Name: "Satisfaction", Type: "[]float32", LocalType: "[]float32", Doc: "decrement in drive value when US is consumed, thus partially satisfying the drive -- positive values are subtracted from current Drive value.", Directives: gti.Directives{}, Tag: ""}},
+ {"Dt", >i.Field{Name: "Dt", Type: "[]float32", LocalType: "[]float32", Doc: "1/Tau", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.UrgencyParams",
+ ShortName: "axon.UrgencyParams",
+ IDName: "urgency-params",
+ Doc: "UrgencyParams has urgency (increasing pressure to do something)\nand parameters for updating it.\nRaw urgency integrates effort when _not_ goal engaged\nwhile effort (negative US 0) integrates when a goal _is_ engaged.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"U50", >i.Field{Name: "U50", Type: "float32", LocalType: "float32", Doc: "value of raw urgency where the urgency activation level is 50%", Directives: gti.Directives{}, Tag: ""}},
+ {"Power", >i.Field{Name: "Power", Type: "int32", LocalType: "int32", Doc: "exponent on the urge factor -- valid numbers are 1,2,4,6", Directives: gti.Directives{}, Tag: "def:\"4\""}},
+ {"Thr", >i.Field{Name: "Thr", Type: "float32", LocalType: "float32", Doc: "threshold for urge -- cuts off small baseline values", Directives: gti.Directives{}, Tag: "def:\"0.2\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.USParams",
+ ShortName: "axon.USParams",
+ IDName: "us-params",
+ Doc: "USParams control how positive and negative USs are\nweighted and integrated to compute an overall PV primary value.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"NegUSOutcomeThr", >i.Field{Name: "NegUSOutcomeThr", Type: "float32", LocalType: "float32", Doc: "threshold for a negative US increment, _after_ multiplying by the USnegGains factor for that US (to allow for normalized input magnitudes that may translate into different magnitude of effects), to drive a phasic ACh response and associated VSMatrix gating and dopamine firing -- i.e., a full negative US outcome event (global NegUSOutcome flag is set)", Directives: gti.Directives{}, Tag: "def:\"0.5\""}},
+ {"PVposGain", >i.Field{Name: "PVposGain", Type: "float32", LocalType: "float32", Doc: "gain factor applied to sum of weighted, drive-scaled positive USs to compute PVpos primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust the overall scaling of PVpos reward within 0-1 normalized range (see also PVnegGain). Each USpos is assumed to be in 0-1 range, default 1.", Directives: gti.Directives{}, Tag: "def:\"2\""}},
+ {"PVnegGain", >i.Field{Name: "PVnegGain", Type: "float32", LocalType: "float32", Doc: "gain factor applied to sum of weighted negative USs to compute PVneg primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust overall scaling of PVneg within 0-1 normalized range (see also PVposGain).", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"USnegGains", >i.Field{Name: "USnegGains", Type: "[]float32", LocalType: "[]float32", Doc: "gain factor for each individual negative US, multiplied prior to 1/(1+x) normalization of each term for activating the OFCnegUS pools. These gains are _not_ applied in computing summary PVneg value (see PVnegWts), and generally must be larger than the weights to leverage the dynamic range within each US pool.", Directives: gti.Directives{}, Tag: ""}},
+ {"PVposWts", >i.Field{Name: "PVposWts", Type: "[]float32", LocalType: "[]float32", Doc: "weight factor applied to each separate positive US on the way to computing the overall PVpos summary value, to control the weighting of each US relative to the others. Each pos US is also multiplied by its dynamic Drive factor as well. Use PVposGain to control the overall scaling of the PVpos value.", Directives: gti.Directives{}, Tag: ""}},
+ {"PVnegWts", >i.Field{Name: "PVnegWts", Type: "[]float32", LocalType: "[]float32", Doc: "weight factor applied to each separate negative US on the way to computing the overall PVneg summary value, to control the weighting of each US relative to the others. The first pool is Time, second is Effort, and these are typically weighted lower (.02) than salient simulation-specific USs (1).", Directives: gti.Directives{}, Tag: ""}},
+ {"USposEst", >i.Field{Name: "USposEst", Type: "[]float32", LocalType: "[]float32", Doc: "computed estimated US values, based on OFCposUSPT and VSMatrix gating, in PVposEst", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LHbParams",
+ ShortName: "axon.LHbParams",
+ IDName: "l-hb-params",
+ Doc: "LHbParams has values for computing LHb & RMTg which drives dips / pauses in DA firing.\nLHb handles all US-related (PV = primary value) processing.\nPositive net LHb activity drives dips / pauses in VTA DA activity,\ne.g., when predicted pos > actual or actual neg > predicted.\nNegative net LHb activity drives bursts in VTA DA activity,\ne.g., when actual pos > predicted (redundant with LV / Amygdala)\nor \"relief\" burst when actual neg < predicted.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"NegThr", >i.Field{Name: "NegThr", Type: "float32", LocalType: "float32", Doc: "threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"BurstGain", >i.Field{Name: "BurstGain", Type: "float32", LocalType: "float32", Doc: "gain multiplier on PVpos for purposes of generating bursts (not for discounting negative dips) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"DipGain", >i.Field{Name: "DipGain", Type: "float32", LocalType: "float32", Doc: "gain multiplier on PVneg for purposes of generating dips (not for discounting positive bursts) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.GiveUpParams",
+ ShortName: "axon.GiveUpParams",
+ IDName: "give-up-params",
+ Doc: "GiveUpParams are parameters for computing when to give up",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"NegThr", >i.Field{Name: "NegThr", Type: "float32", LocalType: "float32", Doc: "threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"Gain", >i.Field{Name: "Gain", Type: "float32", LocalType: "float32", Doc: "multiplier on pos - neg for logistic probability function -- higher gain values produce more binary give up behavior and lower values produce more graded stochastic behavior around the threshold", Directives: gti.Directives{}, Tag: "def:\"10\""}},
+ {"MinPVposEst", >i.Field{Name: "MinPVposEst", Type: "float32", LocalType: "float32", Doc: "minimum estimated PVpos value -- deals with any errors in the estimation process to make sure that erroneous GiveUp doesn't happen.", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.PVLV",
+ ShortName: "axon.PVLV",
+ IDName: "pvlv",
+ Doc: "PVLV represents the core brainstem-level (hypothalamus) bodily drives\nand resulting dopamine from US (unconditioned stimulus) inputs,\nas computed by the PVLV model of primary value (PV)\nand learned value (LV), describing the functions of the Amygala,\nVentral Striatum, VTA and associated midbrain nuclei (LDT, LHb, RMTg).\nCore LHb (lateral habenula) and VTA (ventral tegmental area) dopamine\nare computed in equations using inputs from specialized network layers\n(LDTLayer driven by BLA, CeM layers, VSPatchLayer).\nThe Drives, Effort, US and resulting LHb PV dopamine computation all happens at the\nat the start of each trial (NewState, Step). The LV / CS dopamine is computed\ncycle-by-cycle by the VTA layer using parameters set by the VTA layer.\nRenders USLayer, PVLayer, DrivesLayer representations based on state updated here.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"NPosUSs", >i.Field{Name: "NPosUSs", Type: "uint32", LocalType: "uint32", Doc: "number of possible positive US states and corresponding drives -- the first is always reserved for novelty / curiosity. Must be set programmatically via SetNUSs method, which allocates corresponding parameters.", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NNegUSs", >i.Field{Name: "NNegUSs", Type: "uint32", LocalType: "uint32", Doc: "number of possible negative US states -- is reserved for accumulated time, the accumulated effort cost. Must be set programmatically via SetNUSs method, which allocates corresponding parameters.", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Drive", >i.Field{Name: "Drive", Type: "github.com/emer/axon/axon.DriveParams", LocalType: "DriveParams", Doc: "parameters and state for built-in drives that form the core motivations of agent, controlled by lateral hypothalamus and associated body state monitoring such as glucose levels and thirst.", Directives: gti.Directives{}, Tag: ""}},
+ {"Urgency", >i.Field{Name: "Urgency", Type: "github.com/emer/axon/axon.UrgencyParams", LocalType: "UrgencyParams", Doc: "urgency (increasing pressure to do something) and parameters for updating it. Raw urgency is incremented by same units as effort, but is only reset with a positive US.", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"USs", >i.Field{Name: "USs", Type: "github.com/emer/axon/axon.USParams", LocalType: "USParams", Doc: "controls how positive and negative USs are weighted and integrated to compute an overall PV primary value.", Directives: gti.Directives{}, Tag: ""}},
+ {"LHb", >i.Field{Name: "LHb", Type: "github.com/emer/axon/axon.LHbParams", LocalType: "LHbParams", Doc: "lateral habenula (LHb) parameters and state, which drives dipping / pausing in dopamine when the predicted positive outcome > actual, or actual negative outcome > predicted. Can also drive bursting for the converse, and via matrix phasic firing", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GiveUp", >i.Field{Name: "GiveUp", Type: "github.com/emer/axon/axon.GiveUpParams", LocalType: "GiveUpParams", Doc: "parameters for giving up based on PV pos - neg difference", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.LDTParams",
+ ShortName: "axon.LDTParams",
+ IDName: "ldt-params",
+ Doc: "LDTParams compute reward salience as ACh global neuromodulatory signal\nas a function of the MAX activation of its inputs.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"pvlv_layers"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SrcThr", >i.Field{Name: "SrcThr", Type: "float32", LocalType: "float32", Doc: "threshold per input source, on absolute value (magnitude), to count as a significant reward event, which then drives maximal ACh -- set to 0 to disable this nonlinear behavior", Directives: gti.Directives{}, Tag: "def:\"0.05\""}},
+ {"Rew", >i.Field{Name: "Rew", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "use the global Context.NeuroMod.HasRew flag -- if there is some kind of external reward being given, then ACh goes to 1, else 0 for this component", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"MaintInhib", >i.Field{Name: "MaintInhib", Type: "float32", LocalType: "float32", Doc: "extent to which active maintenance (via Context.NeuroMod.NotMaint PTNotMaintLayer activity) inhibits ACh signals -- when goal engaged, distractability is lower.", Directives: gti.Directives{}, Tag: "def:\"2\""}},
+ {"NotMaintMax", >i.Field{Name: "NotMaintMax", Type: "float32", LocalType: "float32", Doc: "maximum NeuroMod.NotMaint activity for computing Maint as 1-NotMaint -- when NotMaint is >= NotMaintMax, then Maint = 0.", Directives: gti.Directives{}, Tag: "def:\"0.4\""}},
+ {"SrcLay1Idx", >i.Field{Name: "SrcLay1Idx", Type: "int32", LocalType: "int32", Doc: "idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay1Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"SrcLay2Idx", >i.Field{Name: "SrcLay2Idx", Type: "int32", LocalType: "int32", Doc: "idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay2Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"SrcLay3Idx", >i.Field{Name: "SrcLay3Idx", Type: "int32", LocalType: "int32", Doc: "idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay3Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"SrcLay4Idx", >i.Field{Name: "SrcLay4Idx", Type: "int32", LocalType: "int32", Doc: "idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay4Name if present -- -1 if not used", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.VSPatchParams",
+ ShortName: "axon.VSPatchParams",
+ IDName: "vs-patch-params",
+ Doc: "VSPatchParams parameters for VSPatch learning",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Gain", >i.Field{Name: "Gain", Type: "float32", LocalType: "float32", Doc: "multiplier applied after Thr threshold", Directives: gti.Directives{}, Tag: "def:\"3\""}},
+ {"ThrInit", >i.Field{Name: "ThrInit", Type: "float32", LocalType: "float32", Doc: "initial value for overall threshold, which adapts over time -- stored in LayerVals.ActAvgVals.AdaptThr", Directives: gti.Directives{}, Tag: "def:\"0.15\""}},
+ {"ThrLRate", >i.Field{Name: "ThrLRate", Type: "float32", LocalType: "float32", Doc: "learning rate for the threshold -- moves in proportion to same predictive error signal that drives synaptic learning", Directives: gti.Directives{}, Tag: "def:\"0,0.002\""}},
+ {"ThrNonRew", >i.Field{Name: "ThrNonRew", Type: "float32", LocalType: "float32", Doc: "extra gain factor for non-reward trials, which is the most critical", Directives: gti.Directives{}, Tag: "def:\"10\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.VTAParams",
+ ShortName: "axon.VTAParams",
+ IDName: "vta-params",
+ Doc: "VTAParams are for computing overall VTA DA based on LHb PVDA\n(primary value -- at US time, computed at start of each trial\nand stored in LHbPVDA global value)\nand Amygdala (CeM) CS / learned value (LV) activations, which update\nevery cycle.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"CeMGain", >i.Field{Name: "CeMGain", Type: "float32", LocalType: "float32", Doc: "gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values", Directives: gti.Directives{}, Tag: "def:\"0.75\""}},
+ {"LHbGain", >i.Field{Name: "LHbGain", Type: "float32", LocalType: "float32", Doc: "gain on computed LHb DA (Burst - Dip) -- for controlling DA levels", Directives: gti.Directives{}, Tag: "def:\"1.25\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.BLAPrjnParams",
+ ShortName: "axon.BLAPrjnParams",
+ IDName: "bla-prjn-params",
+ Doc: "BLAPrjnParams has parameters for basolateral amygdala learning.\nLearning is driven by the Tr trace as function of ACh * Send Act\nrecorded prior to US, and at US, recv unit delta: CaSpkP - SpkPrv\ntimes normalized GeIntNorm for recv unit credit assignment.\nThe Learn.Trace.Tau time constant determines trace updating over trials\nwhen ACh is above threshold -- this determines strength of second-order\nconditioning -- default of 1 means none, but can be increased as needed.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"pvlv_prjns"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"NegDeltaLRate", >i.Field{Name: "NegDeltaLRate", Type: "float32", LocalType: "float32", Doc: "use 0.01 for acquisition (don't unlearn) and 1 for extinction -- negative delta learning rate multiplier", Directives: gti.Directives{}, Tag: "def:\"0.01,1\""}},
+ {"AChThr", >i.Field{Name: "AChThr", Type: "float32", LocalType: "float32", Doc: "threshold on this layer's ACh level for trace learning updates", Directives: gti.Directives{}, Tag: "def:\"0.1\""}},
+ {"USTrace", >i.Field{Name: "USTrace", Type: "float32", LocalType: "float32", Doc: "proportion of US time stimulus activity to use for the trace component of", Directives: gti.Directives{}, Tag: "def:\"0,0.5\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.RandFunIdx",
+ ShortName: "axon.RandFunIdx",
+ IDName: "rand-fun-idx",
+ Doc: "",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"axonrand"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"axonrand"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"axonrand"}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.RWPredParams",
+ ShortName: "axon.RWPredParams",
+ IDName: "rw-pred-params",
+ Doc: "RWPredParams parameterizes reward prediction for a simple Rescorla-Wagner\nlearning dynamic (i.e., PV learning in the PVLV framework).",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"rl_layers"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"PredRange", >i.Field{Name: "PredRange", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "default 0.1..0.99 range of predictions that can be represented -- having a truncated range preserves some sensitivity in dopamine at the extremes of good or poor performance", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.RWDaParams",
+ ShortName: "axon.RWDaParams",
+ IDName: "rw-da-params",
+ Doc: "RWDaParams computes a dopamine (DA) signal using simple Rescorla-Wagner\nlearning dynamic (i.e., PV learning in the PVLV framework).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"TonicGe", >i.Field{Name: "TonicGe", Type: "float32", LocalType: "float32", Doc: "tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value", Directives: gti.Directives{}, Tag: ""}},
+ {"RWPredLayIdx", >i.Field{Name: "RWPredLayIdx", Type: "int32", LocalType: "int32", Doc: "idx of RWPredLayer to get reward prediction from -- set during Build from BuildConfig RWPredLayName", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.TDIntegParams",
+ ShortName: "axon.TDIntegParams",
+ IDName: "td-integ-params",
+ Doc: "TDIntegParams are params for reward integrator layer",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Discount", >i.Field{Name: "Discount", Type: "float32", LocalType: "float32", Doc: "discount factor -- how much to discount the future prediction from TDPred", Directives: gti.Directives{}, Tag: ""}},
+ {"PredGain", >i.Field{Name: "PredGain", Type: "float32", LocalType: "float32", Doc: "gain factor on TD rew pred activations", Directives: gti.Directives{}, Tag: ""}},
+ {"TDPredLayIdx", >i.Field{Name: "TDPredLayIdx", Type: "int32", LocalType: "int32", Doc: "idx of TDPredLayer to get reward prediction from -- set during Build from BuildConfig TDPredLayName", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.TDDaParams",
+ ShortName: "axon.TDDaParams",
+ IDName: "td-da-params",
+ Doc: "TDDaParams are params for dopamine (DA) signal as the temporal difference (TD)\nbetween the TDIntegLayer activations in the minus and plus phase.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"TonicGe", >i.Field{Name: "TonicGe", Type: "float32", LocalType: "float32", Doc: "tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value", Directives: gti.Directives{}, Tag: ""}},
+ {"TDIntegLayIdx", >i.Field{Name: "TDIntegLayIdx", Type: "int32", LocalType: "int32", Doc: "idx of TDIntegLayer to get reward prediction from -- set during Build from BuildConfig TDIntegLayName", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.RLPredPrjnParams",
+ ShortName: "axon.RLPredPrjnParams",
+ IDName: "rl-pred-prjn-params",
+ Doc: "RLPredPrjnParams does dopamine-modulated learning for reward prediction: Da * Send.Act\nUsed by RWPrjn and TDPredPrjn within corresponding RWPredLayer or TDPredLayer\nto generate reward predictions based on its incoming weights, using linear activation\nfunction. Has no weight bounds or limits on sign etc.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"rl_prjns"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"OppSignLRate", >i.Field{Name: "OppSignLRate", Type: "float32", LocalType: "float32", Doc: "how much to learn on opposite DA sign coding neuron (0..1)", Directives: gti.Directives{}, Tag: ""}},
+ {"DaTol", >i.Field{Name: "DaTol", Type: "float32", LocalType: "float32", Doc: "tolerance on DA -- if below this abs value, then DA goes to zero and there is no learning -- prevents prediction from exactly learning to cancel out reward value, retaining a residual valence of signal", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SynapseVars",
+ ShortName: "axon.SynapseVars",
+ IDName: "synapse-vars",
+ Doc: "SynapseVars are the neuron variables representing current synaptic state,\nspecifically weights.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"synapse"}},
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SynapseCaVars",
+ ShortName: "axon.SynapseCaVars",
+ IDName: "synapse-ca-vars",
+ Doc: "SynapseCaVars are synapse variables for calcium involved in learning,\nwhich are data parallel input specific.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SynapseIdxs",
+ ShortName: "axon.SynapseIdxs",
+ IDName: "synapse-idxs",
+ Doc: "SynapseIdxs are the neuron indexes and other uint32 values (flags, etc).\nThere is only one of these per neuron -- not data parallel.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SynapseVarStrides",
+ ShortName: "axon.SynapseVarStrides",
+ IDName: "synapse-var-strides",
+ Doc: "SynapseVarStrides encodes the stride offsets for synapse variable access\ninto network float32 array.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"synapse"}},
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"synapse"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"synapse"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"synapse"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Synapse", >i.Field{Name: "Synapse", Type: "uint32", LocalType: "uint32", Doc: "synapse level", Directives: gti.Directives{}, Tag: ""}},
+ {"Var", >i.Field{Name: "Var", Type: "uint32", LocalType: "uint32", Doc: "variable level", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SynapseCaStrides",
+ ShortName: "axon.SynapseCaStrides",
+ IDName: "synapse-ca-strides",
+ Doc: "SynapseCaStrides encodes the stride offsets for synapse variable access\ninto network float32 array. Data is always the inner-most variable.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Synapse", >i.Field{Name: "Synapse", Type: "uint64", LocalType: "uint64", Doc: "synapse level", Directives: gti.Directives{}, Tag: ""}},
+ {"Var", >i.Field{Name: "Var", Type: "uint64", LocalType: "uint64", Doc: "variable level", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/axon.SynapseIdxStrides",
+ ShortName: "axon.SynapseIdxStrides",
+ IDName: "synapse-idx-strides",
+ Doc: "SynapseIdxStrides encodes the stride offsets for synapse index access\ninto network uint32 array.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Synapse", >i.Field{Name: "Synapse", Type: "uint32", LocalType: "uint32", Doc: "synapse level", Directives: gti.Directives{}, Tag: ""}},
+ {"Index", >i.Field{Name: "Index", Type: "uint32", LocalType: "uint32", Doc: "index value level", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "uint32", LocalType: "uint32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/axon/hebbprjn.go b/axon/hebbprjn.go
index c966121d5..f3a588fc1 100644
--- a/axon/hebbprjn.go
+++ b/axon/hebbprjn.go
@@ -8,7 +8,7 @@ package axon
todo: for GPU, must have this in base case
-import "github.com/goki/gosl/slbool"
+import "goki.dev/gosl/v2/slbool"
// HebbPrjn is a simple hebbian learning projection, using the CPCA Hebbian rule.
// Note: when used with inhibitory projections, requires Learn.Trace.SubMean = 1
diff --git a/axon/helpers.go b/axon/helpers.go
index 66d67394e..81e9a9584 100644
--- a/axon/helpers.go
+++ b/axon/helpers.go
@@ -7,9 +7,9 @@ package axon
import (
"fmt"
- "github.com/emer/emergent/ecmd"
- "github.com/emer/empi/mpi"
- "github.com/goki/gi/gi"
+ "github.com/emer/emergent/v2/ecmd"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/gi/v2/gi"
)
////////////////////////////////////////////////////
diff --git a/axon/hip_net.go b/axon/hip_net.go
index 5d1d0e358..dfb40b37b 100644
--- a/axon/hip_net.go
+++ b/axon/hip_net.go
@@ -5,79 +5,79 @@
package axon
import (
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/evec"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/norm"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/evec"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/prjn"
+ "goki.dev/etable/v2/norm"
)
// HipConfig have the hippocampus size and connectivity parameters
type HipConfig struct {
// size of EC2
- EC2Size evec.Vec2i `nest:"+" desc:"size of EC2"`
+ EC2Size evec.Vec2i `nest:"+"`
// number of EC3 pools (outer dimension)
- EC3NPool evec.Vec2i `nest:"+" desc:"number of EC3 pools (outer dimension)"`
+ EC3NPool evec.Vec2i `nest:"+"`
// number of neurons in one EC3 pool
- EC3NNrn evec.Vec2i `nest:"+" desc:"number of neurons in one EC3 pool"`
+ EC3NNrn evec.Vec2i `nest:"+"`
// number of neurons in one CA1 pool
- CA1NNrn evec.Vec2i `nest:"+" desc:"number of neurons in one CA1 pool"`
+ CA1NNrn evec.Vec2i `nest:"+"`
// size of CA3
- CA3Size evec.Vec2i `nest:"+" desc:"size of CA3"`
+ CA3Size evec.Vec2i `nest:"+"`
- // [def: 2.236] size of DG / CA3
- DGRatio float32 `def:"2.236" desc:"size of DG / CA3"`
+ // size of DG / CA3
+ DGRatio float32 `def:"2.236"`
- // [def: 0.1] percent connectivity from EC3 to EC2
- EC3ToEC2PCon float32 `def:"0.1" desc:"percent connectivity from EC3 to EC2"`
+ // percent connectivity from EC3 to EC2
+ EC3ToEC2PCon float32 `def:"0.1"`
- // [def: 0.25] percent connectivity from EC2 to DG
- EC2ToDGPCon float32 `def:"0.25" desc:"percent connectivity from EC2 to DG"`
+ // percent connectivity from EC2 to DG
+ EC2ToDGPCon float32 `def:"0.25"`
- // [def: 0.25] percent connectivity from EC2 to CA3
- EC2ToCA3PCon float32 `def:"0.25" desc:"percent connectivity from EC2 to CA3"`
+ // percent connectivity from EC2 to CA3
+ EC2ToCA3PCon float32 `def:"0.25"`
- // [def: 0.25] percent connectivity from CA3 to CA1
- CA3ToCA1PCon float32 `def:"0.25" desc:"percent connectivity from CA3 to CA1"`
+ // percent connectivity from CA3 to CA1
+ CA3ToCA1PCon float32 `def:"0.25"`
- // [def: 0.02] percent connectivity into CA3 from DG
- DGToCA3PCon float32 `def:"0.02" desc:"percent connectivity into CA3 from DG"`
+ // percent connectivity into CA3 from DG
+ DGToCA3PCon float32 `def:"0.02"`
// lateral radius of connectivity in EC2
- EC2LatRadius int `desc:"lateral radius of connectivity in EC2"`
+ EC2LatRadius int
// lateral gaussian sigma in EC2 for how quickly weights fall off with distance
- EC2LatSigma float32 `desc:"lateral gaussian sigma in EC2 for how quickly weights fall off with distance"`
+ EC2LatSigma float32
- // [def: 1] proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc
- MossyDelta float32 `def:"1" desc:"proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc"`
+ // proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc
+ MossyDelta float32 `def:"1"`
- // [def: 0.75] proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc
- MossyDeltaTest float32 `def:"0.75" desc:"proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc"`
+ // proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc
+ MossyDeltaTest float32 `def:"0.75"`
- // [def: 0.9] low theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model
- ThetaLow float32 `def:"0.9" desc:"low theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model"`
+ // low theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model
+ ThetaLow float32 `def:"0.9"`
- // [def: 1] high theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model
- ThetaHigh float32 `def:"1" desc:"high theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model"`
+ // high theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model
+ ThetaHigh float32 `def:"1"`
- // [def: true] flag for clamping the EC5 from EC5ClampSrc
- EC5Clamp bool `def:"true" desc:"flag for clamping the EC5 from EC5ClampSrc"`
+ // flag for clamping the EC5 from EC5ClampSrc
+ EC5Clamp bool `def:"true"`
- // [def: EC3] source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available
- EC5ClampSrc string `def:"EC3" desc:"source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available"`
+ // source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available
+ EC5ClampSrc string `def:"EC3"`
- // [def: true] clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there
- EC5ClampTest bool `def:"true" desc:"clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there"`
+ // clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there
+ EC5ClampTest bool `def:"true"`
- // [def: 0.1] threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization.
- EC5ClampThr float32 `def:"0.1" desc:"threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization."`
+ // threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization.
+ EC5ClampThr float32 `def:"0.1"`
}
func (hip *HipConfig) Defaults() {
diff --git a/axon/hip_prjns.go b/axon/hip_prjns.go
index 518be8a34..f5bf39bcf 100644
--- a/axon/hip_prjns.go
+++ b/axon/hip_prjns.go
@@ -9,20 +9,20 @@ package axon
// HipPrjnParams define behavior of hippocampus prjns, which have special learning rules
type HipPrjnParams struct {
- // [def: 0] Hebbian learning proportion
- Hebb float32 `def:"0" desc:"Hebbian learning proportion"`
+ // Hebbian learning proportion
+ Hebb float32 `def:"0"`
- // [def: 1] EDL proportion
- Err float32 `def:"1" desc:"EDL proportion"`
+ // EDL proportion
+ Err float32 `def:"1"`
- // [def: 0.4:0.8] [min: 0] [max: 1] proportion of correction to apply to sending average activation for hebbian learning component (0=none, 1=all, .5=half, etc)
- SAvgCor float32 `def:"0.4:0.8" min:"0" max:"1" desc:"proportion of correction to apply to sending average activation for hebbian learning component (0=none, 1=all, .5=half, etc)"`
+ // proportion of correction to apply to sending average activation for hebbian learning component (0=none, 1=all, .5=half, etc)
+ SAvgCor float32 `def:"0.4:0.8" min:"0" max:"1"`
- // [def: 0.01] [min: 0] threshold of sending average activation below which learning does not occur (prevents learning when there is no input)
- SAvgThr float32 `def:"0.01" min:"0" desc:"threshold of sending average activation below which learning does not occur (prevents learning when there is no input)"`
+ // threshold of sending average activation below which learning does not occur (prevents learning when there is no input)
+ SAvgThr float32 `def:"0.01" min:"0"`
- // [def: 0.1] [min: 0] sending layer Nominal (need to manually set it to be the same as the sending layer)
- SNominal float32 `def:"0.1" min:"0" desc:"sending layer Nominal (need to manually set it to be the same as the sending layer)"`
+ // sending layer Nominal (need to manually set it to be the same as the sending layer)
+ SNominal float32 `def:"0.1" min:"0"`
pad, pad1, pad2 float32
}
diff --git a/axon/inhib.go b/axon/inhib.go
index 9e5707f80..5697671aa 100644
--- a/axon/inhib.go
+++ b/axon/inhib.go
@@ -6,8 +6,8 @@ package axon
import (
"github.com/emer/axon/fsfffb"
- "github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "goki.dev/gosl/v2/slbool"
+ "goki.dev/mat32/v2"
)
//gosl: hlsl inhib
@@ -24,23 +24,23 @@ import (
// average activity within a target range.
type ActAvgParams struct {
- // [min: 0] [step: 0.01] [typically 0.01 - 0.2] nominal estimated average activity level in the layer, which is used in computing the scaling factor on sending projections from this layer. In general it should roughly match the layer ActAvg.ActMAvg value, which can be logged using the axon.LogAddDiagnosticItems function. If layers receiving from this layer are not getting enough Ge excitation, then this Nominal level can be lowered to increase projection strength (fewer active neurons means each one contributes more, so scaling factor goes as the inverse of activity level), or vice-versa if Ge is too high. It is also the basis for the target activity level used for the AdaptGi option -- see the Offset which is added to this value.
- Nominal float32 `min:"0" step:"0.01" desc:"[typically 0.01 - 0.2] nominal estimated average activity level in the layer, which is used in computing the scaling factor on sending projections from this layer. In general it should roughly match the layer ActAvg.ActMAvg value, which can be logged using the axon.LogAddDiagnosticItems function. If layers receiving from this layer are not getting enough Ge excitation, then this Nominal level can be lowered to increase projection strength (fewer active neurons means each one contributes more, so scaling factor goes as the inverse of activity level), or vice-versa if Ge is too high. It is also the basis for the target activity level used for the AdaptGi option -- see the Offset which is added to this value."`
+ // nominal estimated average activity level in the layer, which is used in computing the scaling factor on sending projections from this layer. In general it should roughly match the layer ActAvg.ActMAvg value, which can be logged using the axon.LogAddDiagnosticItems function. If layers receiving from this layer are not getting enough Ge excitation, then this Nominal level can be lowered to increase projection strength (fewer active neurons means each one contributes more, so scaling factor goes as the inverse of activity level), or vice-versa if Ge is too high. It is also the basis for the target activity level used for the AdaptGi option -- see the Offset which is added to this value.
+ Nominal float32 `min:"0" step:"0.01"`
// enable adapting of layer inhibition Gi multiplier factor (stored in layer GiMult value) to maintain a Target layer level of ActAvg.ActMAvg. This generally works well and improves the long-term stability of the models. It is not enabled by default because it depends on having established a reasonable Nominal + Offset target activity level.
- AdaptGi slbool.Bool `desc:"enable adapting of layer inhibition Gi multiplier factor (stored in layer GiMult value) to maintain a Target layer level of ActAvg.ActMAvg. This generally works well and improves the long-term stability of the models. It is not enabled by default because it depends on having established a reasonable Nominal + Offset target activity level."`
+ AdaptGi slbool.Bool
- // [def: 0] [viewif: AdaptGi] [min: 0] [step: 0.01] offset to add to Nominal for the target average activity that drives adaptation of Gi for this layer. Typically the Nominal level is good, but sometimes Nominal must be adjusted up or down to achieve desired Ge scaling, so this Offset can compensate accordingly.
- Offset float32 `def:"0" min:"0" step:"0.01" viewif:"AdaptGi" desc:"offset to add to Nominal for the target average activity that drives adaptation of Gi for this layer. Typically the Nominal level is good, but sometimes Nominal must be adjusted up or down to achieve desired Ge scaling, so this Offset can compensate accordingly."`
+ // offset to add to Nominal for the target average activity that drives adaptation of Gi for this layer. Typically the Nominal level is good, but sometimes Nominal must be adjusted up or down to achieve desired Ge scaling, so this Offset can compensate accordingly.
+ Offset float32 `def:"0" min:"0" step:"0.01" viewif:"AdaptGi"`
- // [def: 0] [viewif: AdaptGi] tolerance for higher than Target target average activation as a proportion of that target value (0 = exactly the target, 0.2 = 20% higher than target) -- only once activations move outside this tolerance are inhibitory values adapted.
- HiTol float32 `def:"0" viewif:"AdaptGi" desc:"tolerance for higher than Target target average activation as a proportion of that target value (0 = exactly the target, 0.2 = 20% higher than target) -- only once activations move outside this tolerance are inhibitory values adapted."`
+ // tolerance for higher than Target target average activation as a proportion of that target value (0 = exactly the target, 0.2 = 20% higher than target) -- only once activations move outside this tolerance are inhibitory values adapted.
+ HiTol float32 `def:"0" viewif:"AdaptGi"`
- // [def: 0.8] [viewif: AdaptGi] tolerance for lower than Target target average activation as a proportion of that target value (0 = exactly the target, 0.5 = 50% lower than target) -- only once activations move outside this tolerance are inhibitory values adapted.
- LoTol float32 `def:"0.8" viewif:"AdaptGi" desc:"tolerance for lower than Target target average activation as a proportion of that target value (0 = exactly the target, 0.5 = 50% lower than target) -- only once activations move outside this tolerance are inhibitory values adapted."`
+ // tolerance for lower than Target target average activation as a proportion of that target value (0 = exactly the target, 0.5 = 50% lower than target) -- only once activations move outside this tolerance are inhibitory values adapted.
+ LoTol float32 `def:"0.8" viewif:"AdaptGi"`
- // [def: 0.1] [viewif: AdaptGi] rate of Gi adaptation as function of AdaptRate * (Target - ActMAvg) / Target -- occurs at spaced intervals determined by Network.SlowInterval value -- slower values such as 0.01 may be needed for large networks and sparse layers.
- AdaptRate float32 `def:"0.1" viewif:"AdaptGi" desc:"rate of Gi adaptation as function of AdaptRate * (Target - ActMAvg) / Target -- occurs at spaced intervals determined by Network.SlowInterval value -- slower values such as 0.01 may be needed for large networks and sparse layers."`
+ // rate of Gi adaptation as function of AdaptRate * (Target - ActMAvg) / Target -- occurs at spaced intervals determined by Network.SlowInterval value -- slower values such as 0.01 may be needed for large networks and sparse layers.
+ AdaptRate float32 `def:"0.1" viewif:"AdaptGi"`
pad, pad1 float32
}
@@ -85,31 +85,31 @@ func (aa *ActAvgParams) Adapt(gimult *float32, act float32) bool {
type TopoInhibParams struct {
// use topographic inhibition
- On slbool.Bool `desc:"use topographic inhibition"`
+ On slbool.Bool
- // [viewif: On] half-width of topographic inhibition within layer
- Width int32 `viewif:"On" desc:"half-width of topographic inhibition within layer"`
+ // half-width of topographic inhibition within layer
+ Width int32 `viewif:"On"`
- // [viewif: On] normalized gaussian sigma as proportion of Width, for gaussian weighting
- Sigma float32 `viewif:"On" desc:"normalized gaussian sigma as proportion of Width, for gaussian weighting"`
+ // normalized gaussian sigma as proportion of Width, for gaussian weighting
+ Sigma float32 `viewif:"On"`
- // [viewif: On] half-width of topographic inhibition within layer
- Wrap slbool.Bool `viewif:"On" desc:"half-width of topographic inhibition within layer"`
+ // half-width of topographic inhibition within layer
+ Wrap slbool.Bool `viewif:"On"`
- // [viewif: On] overall inhibition multiplier for topographic inhibition (generally <= 1)
- Gi float32 `viewif:"On" desc:"overall inhibition multiplier for topographic inhibition (generally <= 1)"`
+ // overall inhibition multiplier for topographic inhibition (generally <= 1)
+ Gi float32 `viewif:"On"`
- // [viewif: On] overall inhibitory contribution from feedforward inhibition -- multiplies average Ge from pools or Ge from neurons
- FF float32 `viewif:"On" desc:"overall inhibitory contribution from feedforward inhibition -- multiplies average Ge from pools or Ge from neurons"`
+ // overall inhibitory contribution from feedforward inhibition -- multiplies average Ge from pools or Ge from neurons
+ FF float32 `viewif:"On"`
- // [viewif: On] overall inhibitory contribution from feedback inhibition -- multiplies average activation from pools or Act from neurons
- FB float32 `viewif:"On" desc:"overall inhibitory contribution from feedback inhibition -- multiplies average activation from pools or Act from neurons"`
+ // overall inhibitory contribution from feedback inhibition -- multiplies average activation from pools or Act from neurons
+ FB float32 `viewif:"On"`
- // [viewif: On] feedforward zero point for Ge per neuron (summed Ge is compared to N * FF0) -- below this level, no FF inhibition is computed, above this it is FF * (Sum Ge - N * FF0)
- FF0 float32 `viewif:"On" desc:"feedforward zero point for Ge per neuron (summed Ge is compared to N * FF0) -- below this level, no FF inhibition is computed, above this it is FF * (Sum Ge - N * FF0)"`
+ // feedforward zero point for Ge per neuron (summed Ge is compared to N * FF0) -- below this level, no FF inhibition is computed, above this it is FF * (Sum Ge - N * FF0)
+ FF0 float32 `viewif:"On"`
// weight value at width -- to assess the value of Sigma
- WidthWt float32 `inactive:"+" desc:"weight value at width -- to assess the value of Sigma"`
+ WidthWt float32 `inactive:"+"`
pad, pad1, pad2 float32
}
@@ -146,14 +146,14 @@ func (ti *TopoInhibParams) GiFmGeAct(ge, act, ff0 float32) float32 {
// which is used for Ge rescaling and potentially for adapting inhibition over time
type InhibParams struct {
- // [view: inline] layer-level and pool-level average activation initial values and updating / adaptation thereof -- initial values help determine initial scaling factors.
- ActAvg ActAvgParams `view:"inline" desc:"layer-level and pool-level average activation initial values and updating / adaptation thereof -- initial values help determine initial scaling factors."`
+ // layer-level and pool-level average activation initial values and updating / adaptation thereof -- initial values help determine initial scaling factors.
+ ActAvg ActAvgParams `view:"inline"`
- // [view: inline] inhibition across the entire layer -- inputs generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers. If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition.
- Layer fsfffb.GiParams `view:"inline" desc:"inhibition across the entire layer -- inputs generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers. If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition."`
+ // inhibition across the entire layer -- inputs generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers. If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition.
+ Layer fsfffb.GiParams `view:"inline"`
- // [view: inline] inhibition within sub-pools of units, for layers with 4D shape -- almost always need this if the layer has pools.
- Pool fsfffb.GiParams `view:"inline" desc:"inhibition within sub-pools of units, for layers with 4D shape -- almost always need this if the layer has pools."`
+ // inhibition within sub-pools of units, for layers with 4D shape -- almost always need this if the layer has pools.
+ Pool fsfffb.GiParams `view:"inline"`
}
func (ip *InhibParams) Update() {
diff --git a/axon/layer.go b/axon/layer.go
index 0357b2fb8..9f05a3dfa 100644
--- a/axon/layer.go
+++ b/axon/layer.go
@@ -10,11 +10,8 @@ import (
"math/rand"
"strings"
- "github.com/emer/emergent/erand"
- "github.com/emer/etable/etensor"
- "github.com/goki/ki/ints"
- "github.com/goki/ki/ki"
- "github.com/goki/ki/kit"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/etable/v2/etensor"
)
// index naming:
@@ -27,11 +24,9 @@ type Layer struct {
LayerBase
// all layer-level parameters -- these must remain constant once configured
- Params *LayerParams `desc:"all layer-level parameters -- these must remain constant once configured"`
+ Params *LayerParams
}
-var KiT_Layer = kit.Types.AddType(&Layer{}, LayerProps)
-
// Object returns the object with parameters to be set by emer.Params
func (ly *Layer) Object() any {
return ly.Params
@@ -507,8 +502,8 @@ func (ly *Layer) ApplyExtFlags() (clearMask, setMask NeuronFlags, toTarg bool) {
// ApplyExt2D applies 2D tensor external input
func (ly *Layer) ApplyExt2D(ctx *Context, di uint32, ext etensor.Tensor) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
- ymx := ints.MinInt(ext.Dim(0), ly.Shp.Dim(0))
- xmx := ints.MinInt(ext.Dim(1), ly.Shp.Dim(1))
+ ymx := min(ext.Dim(0), ly.Shp.Dim(0))
+ xmx := min(ext.Dim(1), ly.Shp.Dim(1))
for y := 0; y < ymx; y++ {
for x := 0; x < xmx; x++ {
idx := []int{y, x}
@@ -524,8 +519,8 @@ func (ly *Layer) ApplyExt2Dto4D(ctx *Context, di uint32, ext etensor.Tensor) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
lNy, lNx, _, _ := etensor.Prjn2DShape(&ly.Shp, false)
- ymx := ints.MinInt(ext.Dim(0), lNy)
- xmx := ints.MinInt(ext.Dim(1), lNx)
+ ymx := min(ext.Dim(0), lNy)
+ xmx := min(ext.Dim(1), lNx)
for y := 0; y < ymx; y++ {
for x := 0; x < xmx; x++ {
idx := []int{y, x}
@@ -539,10 +534,10 @@ func (ly *Layer) ApplyExt2Dto4D(ctx *Context, di uint32, ext etensor.Tensor) {
// ApplyExt4D applies 4D tensor external input
func (ly *Layer) ApplyExt4D(ctx *Context, di uint32, ext etensor.Tensor) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
- ypmx := ints.MinInt(ext.Dim(0), ly.Shp.Dim(0))
- xpmx := ints.MinInt(ext.Dim(1), ly.Shp.Dim(1))
- ynmx := ints.MinInt(ext.Dim(2), ly.Shp.Dim(2))
- xnmx := ints.MinInt(ext.Dim(3), ly.Shp.Dim(3))
+ ypmx := min(ext.Dim(0), ly.Shp.Dim(0))
+ xpmx := min(ext.Dim(1), ly.Shp.Dim(1))
+ ynmx := min(ext.Dim(2), ly.Shp.Dim(2))
+ xnmx := min(ext.Dim(3), ly.Shp.Dim(3))
for yp := 0; yp < ypmx; yp++ {
for xp := 0; xp < xpmx; xp++ {
for yn := 0; yn < ynmx; yn++ {
@@ -562,7 +557,7 @@ func (ly *Layer) ApplyExt4D(ctx *Context, di uint32, ext etensor.Tensor) {
// otherwise it goes in Ext
func (ly *Layer) ApplyExt1DTsr(ctx *Context, di uint32, ext etensor.Tensor) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
- mx := uint32(ints.MinInt(ext.Len(), int(ly.NNeurons)))
+ mx := uint32(min(ext.Len(), int(ly.NNeurons)))
for lni := uint32(0); lni < mx; lni++ {
val := float32(ext.FloatVal1D(int(lni)))
ly.ApplyExtVal(ctx, lni, di, val, clearMask, setMask, toTarg)
@@ -574,7 +569,7 @@ func (ly *Layer) ApplyExt1DTsr(ctx *Context, di uint32, ext etensor.Tensor) {
// otherwise it goes in Ext
func (ly *Layer) ApplyExt1D(ctx *Context, di uint32, ext []float64) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
- mx := uint32(ints.MinInt(len(ext), int(ly.NNeurons)))
+ mx := uint32(min(len(ext), int(ly.NNeurons)))
for lni := uint32(0); lni < mx; lni++ {
val := float32(ext[lni])
ly.ApplyExtVal(ctx, lni, di, val, clearMask, setMask, toTarg)
@@ -586,7 +581,7 @@ func (ly *Layer) ApplyExt1D(ctx *Context, di uint32, ext []float64) {
// otherwise it goes in Ext
func (ly *Layer) ApplyExt1D32(ctx *Context, di uint32, ext []float32) {
clearMask, setMask, toTarg := ly.ApplyExtFlags()
- mx := uint32(ints.MinInt(len(ext), int(ly.NNeurons)))
+ mx := uint32(min(len(ext), int(ly.NNeurons)))
for lni := uint32(0); lni < mx; lni++ {
val := ext[lni]
ly.ApplyExtVal(ctx, lni, di, val, clearMask, setMask, toTarg)
@@ -895,6 +890,7 @@ func (ly *Layer) LesionNeurons(prop float32) int {
//////////////////////////////////////////////////////////////////////////////////////
// Layer props for gui
+/*
var LayerProps = ki.Props{
"EnumType:Typ": KiT_LayerTypes, // uses our LayerTypes for GUI
"ToolBar": ki.PropSlice{
@@ -926,3 +922,4 @@ var LayerProps = ki.Props{
}},
},
}
+*/
diff --git a/axon/layer_compute.go b/axon/layer_compute.go
index bd0be08ab..a9af52336 100644
--- a/axon/layer_compute.go
+++ b/axon/layer_compute.go
@@ -8,8 +8,8 @@ import (
"fmt"
"log"
- "github.com/emer/etable/minmax"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/mat32/v2"
)
// index naming:
diff --git a/axon/layer_test.go b/axon/layer_test.go
index 526df87d9..22608730e 100644
--- a/axon/layer_test.go
+++ b/axon/layer_test.go
@@ -5,10 +5,10 @@ import (
"os"
"testing"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/prjn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "goki.dev/etable/v2/etensor"
)
func TestLayer(t *testing.T) {
diff --git a/axon/layerbase.go b/axon/layerbase.go
index e6e228771..965c4b859 100644
--- a/axon/layerbase.go
+++ b/axon/layerbase.go
@@ -12,14 +12,14 @@ import (
"math"
"strconv"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/relpos"
- "github.com/emer/emergent/weights"
- "github.com/emer/etable/etensor"
- "github.com/goki/gi/giv"
- "github.com/goki/ki/indent"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/relpos"
+ "github.com/emer/emergent/v2/weights"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/glop/indent"
+ "goki.dev/mat32/v2"
)
// LayerBase manages the structural elements of the layer, which are common
@@ -30,77 +30,77 @@ import (
// accessed via the AxonLay field.
type LayerBase struct {
- // [view: -] we need a pointer to ourselves as an AxonLayer (which subsumes emer.Layer), which can always be used to extract the true underlying type of object when layer is embedded in other structs -- function receivers do not have this ability so this is necessary.
- AxonLay AxonLayer `copy:"-" json:"-" xml:"-" view:"-" desc:"we need a pointer to ourselves as an AxonLayer (which subsumes emer.Layer), which can always be used to extract the true underlying type of object when layer is embedded in other structs -- function receivers do not have this ability so this is necessary."`
+ // we need a pointer to ourselves as an AxonLayer (which subsumes emer.Layer), which can always be used to extract the true underlying type of object when layer is embedded in other structs -- function receivers do not have this ability so this is necessary.
+ AxonLay AxonLayer `copy:"-" json:"-" xml:"-" view:"-"`
- // [view: -] our parent network, in case we need to use it to find other layers etc -- set when added by network
- Network *Network `copy:"-" json:"-" xml:"-" view:"-" desc:"our parent network, in case we need to use it to find other layers etc -- set when added by network"`
+ // our parent network, in case we need to use it to find other layers etc -- set when added by network
+ Network *Network `copy:"-" json:"-" xml:"-" view:"-"`
// Name of the layer -- this must be unique within the network, which has a map for quick lookup and layers are typically accessed directly by name
- Nm string `desc:"Name of the layer -- this must be unique within the network, which has a map for quick lookup and layers are typically accessed directly by name"`
+ Nm string
// Class is for applying parameter styles, can be space separated multple tags
- Cls string `desc:"Class is for applying parameter styles, can be space separated multple tags"`
+ Cls string
// inactivate this layer -- allows for easy experimentation
- Off bool `desc:"inactivate this layer -- allows for easy experimentation"`
+ Off bool
// shape of the layer -- can be 2D for basic layers and 4D for layers with sub-groups (hypercolumns) -- order is outer-to-inner (row major) so Y then X for 2D and for 4D: Y-X unit pools then Y-X neurons within pools
- Shp etensor.Shape `desc:"shape of the layer -- can be 2D for basic layers and 4D for layers with sub-groups (hypercolumns) -- order is outer-to-inner (row major) so Y then X for 2D and for 4D: Y-X unit pools then Y-X neurons within pools"`
+ Shp etensor.Shape
// type of layer -- Hidden, Input, Target, Compare, or extended type in specialized algorithms -- matches against .Class parameter styles (e.g., .Hidden etc)
- Typ LayerTypes `desc:"type of layer -- Hidden, Input, Target, Compare, or extended type in specialized algorithms -- matches against .Class parameter styles (e.g., .Hidden etc)"`
+ Typ LayerTypes
- // [view: inline] [tableview: -] Spatial relationship to other layer, determines positioning
- Rel relpos.Rel `tableview:"-" view:"inline" desc:"Spatial relationship to other layer, determines positioning"`
+ // Spatial relationship to other layer, determines positioning
+ Rel relpos.Rel `tableview:"-" view:"inline"`
- // [tableview: -] position of lower-left-hand corner of layer in 3D space, computed from Rel. Layers are in X-Y width - height planes, stacked vertically in Z axis.
- Ps mat32.Vec3 `tableview:"-" desc:"position of lower-left-hand corner of layer in 3D space, computed from Rel. Layers are in X-Y width - height planes, stacked vertically in Z axis."`
+ // position of lower-left-hand corner of layer in 3D space, computed from Rel. Layers are in X-Y width - height planes, stacked vertically in Z axis.
+ Ps mat32.Vec3 `tableview:"-"`
- // [view: -] a 0..n-1 index of the position of the layer within list of layers in the network. For Axon networks, it only has significance in determining who gets which weights for enforcing initial weight symmetry -- higher layers get weights from lower layers.
- Idx int `view:"-" inactive:"-" desc:"a 0..n-1 index of the position of the layer within list of layers in the network. For Axon networks, it only has significance in determining who gets which weights for enforcing initial weight symmetry -- higher layers get weights from lower layers."`
+ // a 0..n-1 index of the position of the layer within list of layers in the network. For Axon networks, it only has significance in determining who gets which weights for enforcing initial weight symmetry -- higher layers get weights from lower layers.
+ Idx int `view:"-" inactive:"-"`
- // [view: -] number of neurons in the layer
- NNeurons uint32 `view:"-" desc:"number of neurons in the layer"`
+ // number of neurons in the layer
+ NNeurons uint32 `view:"-"`
- // [view: -] starting index of neurons for this layer within the global Network list
- NeurStIdx uint32 `view:"-" inactive:"-" desc:"starting index of neurons for this layer within the global Network list"`
+ // starting index of neurons for this layer within the global Network list
+ NeurStIdx uint32 `view:"-" inactive:"-"`
- // [view: -] number of pools based on layer shape -- at least 1 for layer pool + 4D subpools
- NPools uint32 `view:"-" desc:"number of pools based on layer shape -- at least 1 for layer pool + 4D subpools"`
+ // number of pools based on layer shape -- at least 1 for layer pool + 4D subpools
+ NPools uint32 `view:"-"`
- // [view: -] maximum amount of input data that can be processed in parallel in one pass of the network. Neuron, Pool, Vals storage is allocated to hold this amount.
- MaxData uint32 `view:"-" desc:"maximum amount of input data that can be processed in parallel in one pass of the network. Neuron, Pool, Vals storage is allocated to hold this amount."`
+ // maximum amount of input data that can be processed in parallel in one pass of the network. Neuron, Pool, Vals storage is allocated to hold this amount.
+ MaxData uint32 `view:"-"`
- // [view: -] indexes of representative units in the layer, for computationally expensive stats or displays -- also set RepShp
- RepIxs []int `view:"-" desc:"indexes of representative units in the layer, for computationally expensive stats or displays -- also set RepShp"`
+ // indexes of representative units in the layer, for computationally expensive stats or displays -- also set RepShp
+ RepIxs []int `view:"-"`
- // [view: -] shape of representative units in the layer -- if RepIxs is empty or .Shp is nil, use overall layer shape
- RepShp etensor.Shape `view:"-" desc:"shape of representative units in the layer -- if RepIxs is empty or .Shp is nil, use overall layer shape"`
+ // shape of representative units in the layer -- if RepIxs is empty or .Shp is nil, use overall layer shape
+ RepShp etensor.Shape `view:"-"`
// list of receiving projections into this layer from other layers
- RcvPrjns AxonPrjns `desc:"list of receiving projections into this layer from other layers"`
+ RcvPrjns AxonPrjns
// list of sending projections from this layer to other layers
- SndPrjns AxonPrjns `desc:"list of sending projections from this layer to other layers"`
+ SndPrjns AxonPrjns
// layer-level state values that are updated during computation -- one for each data parallel -- is a sub-slice of network full set
- Vals []LayerVals `desc:"layer-level state values that are updated during computation -- one for each data parallel -- is a sub-slice of network full set"`
+ Vals []LayerVals
// computes FS-FFFB inhibition and other pooled, aggregate state variables -- has at least 1 for entire layer (lpl = layer pool), and one for each sub-pool if shape supports that (4D) * 1 per data parallel (inner loop). This is a sub-slice from overall Network Pools slice. You must iterate over index and use pointer to modify values.
- Pools []Pool `desc:"computes FS-FFFB inhibition and other pooled, aggregate state variables -- has at least 1 for entire layer (lpl = layer pool), and one for each sub-pool if shape supports that (4D) * 1 per data parallel (inner loop). This is a sub-slice from overall Network Pools slice. You must iterate over index and use pointer to modify values."`
+ Pools []Pool
- // [view: -] [Neurons][Data] external input values for this layer, allocated from network global Exts slice
- Exts []float32 `view:"-" desc:"[Neurons][Data] external input values for this layer, allocated from network global Exts slice"`
+ // external input values for this layer, allocated from network global Exts slice
+ Exts []float32 `view:"-"`
- // [tableview: -] configuration data set when the network is configured, that is used during the network Build() process via PostBuild method, after all the structure of the network has been fully constructed. In particular, the Params is nil until Build, so setting anything specific in there (e.g., an index to another layer) must be done as a second pass. Note that Params are all applied after Build and can set user-modifiable params, so this is for more special algorithm structural parameters set during ConfigNet() methods.,
- BuildConfig map[string]string `tableview:"-" desc:"configuration data set when the network is configured, that is used during the network Build() process via PostBuild method, after all the structure of the network has been fully constructed. In particular, the Params is nil until Build, so setting anything specific in there (e.g., an index to another layer) must be done as a second pass. Note that Params are all applied after Build and can set user-modifiable params, so this is for more special algorithm structural parameters set during ConfigNet() methods.,"`
+ // configuration data set when the network is configured, that is used during the network Build() process via PostBuild method, after all the structure of the network has been fully constructed. In particular, the Params is nil until Build, so setting anything specific in there (e.g., an index to another layer) must be done as a second pass. Note that Params are all applied after Build and can set user-modifiable params, so this is for more special algorithm structural parameters set during ConfigNet() methods.,
+ BuildConfig map[string]string `tableview:"-"`
- // [tableview: -] default parameters that are applied prior to user-set parameters -- these are useful for specific layer functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a layer type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map.
- DefParams params.Params `tableview:"-" desc:"default parameters that are applied prior to user-set parameters -- these are useful for specific layer functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a layer type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map."`
+ // default parameters that are applied prior to user-set parameters -- these are useful for specific layer functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a layer type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map.
+ DefParams params.Params `tableview:"-"`
- // [tableview: -] provides a history of parameters applied to the layer
- ParamsHistory params.HistoryImpl `tableview:"-" desc:"provides a history of parameters applied to the layer"`
+ // provides a history of parameters applied to the layer
+ ParamsHistory params.HistoryImpl `tableview:"-"`
}
// emer.Layer interface methods
@@ -247,12 +247,12 @@ func (ly *LayerBase) Size() mat32.Vec2 {
var sz mat32.Vec2
switch {
case ly.Is2D():
- sz = mat32.Vec2{float32(ly.Shp.Dim(1)), float32(ly.Shp.Dim(0))} // Y, X
+ sz = mat32.V2(float32(ly.Shp.Dim(1)), float32(ly.Shp.Dim(0))) // Y, X
case ly.Is4D():
// note: pool spacing is handled internally in display and does not affect overall size
- sz = mat32.Vec2{float32(ly.Shp.Dim(1) * ly.Shp.Dim(3)), float32(ly.Shp.Dim(0) * ly.Shp.Dim(2))} // Y, X
+ sz = mat32.V2(float32(ly.Shp.Dim(1)*ly.Shp.Dim(3)), float32(ly.Shp.Dim(0)*ly.Shp.Dim(2))) // Y, X
default:
- sz = mat32.Vec2{float32(ly.Shp.Len()), 1}
+ sz = mat32.V2(float32(ly.Shp.Len()), 1)
}
return sz.MulScalar(ly.Rel.Scale)
}
diff --git a/axon/layerparams.go b/axon/layerparams.go
index e2b126b2b..6849e3458 100644
--- a/axon/layerparams.go
+++ b/axon/layerparams.go
@@ -7,7 +7,7 @@ package axon
import (
"encoding/json"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: hlsl layerparams
@@ -29,46 +29,46 @@ import (
type LayerIdxs struct {
// layer index
- LayIdx uint32 `inactive:"+" desc:"layer index"`
+ LayIdx uint32 `inactive:"+"`
// maximum number of data parallel elements
- MaxData uint32 `inactive:"+" desc:"maximum number of data parallel elements"`
+ MaxData uint32 `inactive:"+"`
// start of pools for this layer -- first one is always the layer-wide pool
- PoolSt uint32 `inactive:"+" desc:"start of pools for this layer -- first one is always the layer-wide pool"`
+ PoolSt uint32 `inactive:"+"`
// start of neurons for this layer in global array (same as Layer.NeurStIdx)
- NeurSt uint32 `inactive:"+" desc:"start of neurons for this layer in global array (same as Layer.NeurStIdx)"`
+ NeurSt uint32 `inactive:"+"`
// number of neurons in layer
- NeurN uint32 `inactive:"+" desc:"number of neurons in layer"`
+ NeurN uint32 `inactive:"+"`
// start index into RecvPrjns global array
- RecvSt uint32 `inactive:"+" desc:"start index into RecvPrjns global array"`
+ RecvSt uint32 `inactive:"+"`
// number of recv projections
- RecvN uint32 `inactive:"+" desc:"number of recv projections"`
+ RecvN uint32 `inactive:"+"`
// start index into RecvPrjns global array
- SendSt uint32 `inactive:"+" desc:"start index into RecvPrjns global array"`
+ SendSt uint32 `inactive:"+"`
// number of recv projections
- SendN uint32 `inactive:"+" desc:"number of recv projections"`
+ SendN uint32 `inactive:"+"`
// starting index in network global Exts list of external input for this layer -- only for Input / Target / Compare layer types
- ExtsSt uint32 `inactive:"+" desc:"starting index in network global Exts list of external input for this layer -- only for Input / Target / Compare layer types"`
+ ExtsSt uint32 `inactive:"+"`
// layer shape Pools Y dimension -- 1 for 2D
- ShpPlY int32 `inactive:"+" desc:"layer shape Pools Y dimension -- 1 for 2D"`
+ ShpPlY int32 `inactive:"+"`
// layer shape Pools X dimension -- 1 for 2D
- ShpPlX int32 `inactive:"+" desc:"layer shape Pools X dimension -- 1 for 2D"`
+ ShpPlX int32 `inactive:"+"`
// layer shape Units Y dimension
- ShpUnY int32 `inactive:"+" desc:"layer shape Units Y dimension"`
+ ShpUnY int32 `inactive:"+"`
// layer shape Units X dimension
- ShpUnX int32 `inactive:"+" desc:"layer shape Units X dimension"`
+ ShpUnX int32 `inactive:"+"`
pad, pad1 uint32
}
@@ -95,16 +95,16 @@ func (lx *LayerIdxs) ExtIdx(ni, di uint32) uint32 {
type LayerInhibIdxs struct {
// idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used
- Idx1 int32 `inactive:"+" desc:"idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used"`
+ Idx1 int32 `inactive:"+"`
// idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used
- Idx2 int32 `inactive:"+" desc:"idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used"`
+ Idx2 int32 `inactive:"+"`
// idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used
- Idx3 int32 `inactive:"+" desc:"idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used"`
+ Idx3 int32 `inactive:"+"`
// idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used
- Idx4 int32 `inactive:"+" desc:"idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used"`
+ Idx4 int32 `inactive:"+"`
}
// note: the following must appear above LayerParams for GPU usage which is order sensitive
@@ -134,60 +134,60 @@ func SetNeuronExtPosNeg(ctx *Context, ni, di uint32, val float32) {
type LayerParams struct {
// functional type of layer -- determines functional code path for specialized layer types, and is synchronized with the Layer.Typ value
- LayType LayerTypes `desc:"functional type of layer -- determines functional code path for specialized layer types, and is synchronized with the Layer.Typ value"`
+ LayType LayerTypes
pad, pad1, pad2 int32
- // [view: add-fields] Activation parameters and methods for computing activations
- Acts ActParams `view:"add-fields" desc:"Activation parameters and methods for computing activations"`
+ // Activation parameters and methods for computing activations
+ Acts ActParams `view:"add-fields"`
- // [view: add-fields] Inhibition parameters and methods for computing layer-level inhibition
- Inhib InhibParams `view:"add-fields" desc:"Inhibition parameters and methods for computing layer-level inhibition"`
+ // Inhibition parameters and methods for computing layer-level inhibition
+ Inhib InhibParams `view:"add-fields"`
- // [view: inline] indexes of layers that contribute between-layer inhibition to this layer -- set these indexes via BuildConfig LayInhibXName (X = 1, 2...)
- LayInhib LayerInhibIdxs `view:"inline" desc:"indexes of layers that contribute between-layer inhibition to this layer -- set these indexes via BuildConfig LayInhibXName (X = 1, 2...)"`
+ // indexes of layers that contribute between-layer inhibition to this layer -- set these indexes via BuildConfig LayInhibXName (X = 1, 2...)
+ LayInhib LayerInhibIdxs `view:"inline"`
- // [view: add-fields] Learning parameters and methods that operate at the neuron level
- Learn LearnNeurParams `view:"add-fields" desc:"Learning parameters and methods that operate at the neuron level"`
+ // Learning parameters and methods that operate at the neuron level
+ Learn LearnNeurParams `view:"add-fields"`
- // [view: inline] [viewif: LayType=SuperLayer] BurstParams determine how the 5IB Burst activation is computed from CaSpkP integrated spiking values in Super layers -- thresholded.
- Bursts BurstParams `viewif:"LayType=SuperLayer" view:"inline" desc:"BurstParams determine how the 5IB Burst activation is computed from CaSpkP integrated spiking values in Super layers -- thresholded."`
+ // BurstParams determine how the 5IB Burst activation is computed from CaSpkP integrated spiking values in Super layers -- thresholded.
+ Bursts BurstParams `viewif:"LayType=SuperLayer" view:"inline"`
- // [view: inline] [viewif: LayType=[CTLayer,PTPredLayer,PTNotMaintLayer,BLALayer]] params for the CT corticothalamic layer and PTPred layer that generates predictions over the Pulvinar using context -- uses the CtxtGe excitatory input plus stronger NMDA channels to maintain context trace
- CT CTParams `viewif:"LayType=[CTLayer,PTPredLayer,PTNotMaintLayer,BLALayer]" view:"inline" desc:"params for the CT corticothalamic layer and PTPred layer that generates predictions over the Pulvinar using context -- uses the CtxtGe excitatory input plus stronger NMDA channels to maintain context trace"`
+ // ] params for the CT corticothalamic layer and PTPred layer that generates predictions over the Pulvinar using context -- uses the CtxtGe excitatory input plus stronger NMDA channels to maintain context trace
+ CT CTParams `viewif:"LayType=[CTLayer,PTPredLayer,PTNotMaintLayer,BLALayer]" view:"inline"`
- // [view: inline] [viewif: LayType=PulvinarLayer] provides parameters for how the plus-phase (outcome) state of Pulvinar thalamic relay cell neurons is computed from the corresponding driver neuron Burst activation (or CaSpkP if not Super)
- Pulv PulvParams `viewif:"LayType=PulvinarLayer" view:"inline" desc:"provides parameters for how the plus-phase (outcome) state of Pulvinar thalamic relay cell neurons is computed from the corresponding driver neuron Burst activation (or CaSpkP if not Super)"`
+ // provides parameters for how the plus-phase (outcome) state of Pulvinar thalamic relay cell neurons is computed from the corresponding driver neuron Burst activation (or CaSpkP if not Super)
+ Pulv PulvParams `viewif:"LayType=PulvinarLayer" view:"inline"`
- // [view: inline] [viewif: LayType=MatrixLayer] parameters for BG Striatum Matrix MSN layers, which are the main Go / NoGo gating units in BG.
- Matrix MatrixParams `viewif:"LayType=MatrixLayer" view:"inline" desc:"parameters for BG Striatum Matrix MSN layers, which are the main Go / NoGo gating units in BG."`
+ // parameters for BG Striatum Matrix MSN layers, which are the main Go / NoGo gating units in BG.
+ Matrix MatrixParams `viewif:"LayType=MatrixLayer" view:"inline"`
- // [view: inline] [viewif: LayType=GPLayer] type of GP Layer.
- GP GPParams `viewif:"LayType=GPLayer" view:"inline" desc:"type of GP Layer."`
+ // type of GP Layer.
+ GP GPParams `viewif:"LayType=GPLayer" view:"inline"`
- // [view: inline] [viewif: LayType=VSPatchLayer] parameters for VSPatch learning
- VSPatch VSPatchParams `viewif:"LayType=VSPatchLayer" view:"inline" desc:"parameters for VSPatch learning"`
+ // parameters for VSPatch learning
+ VSPatch VSPatchParams `viewif:"LayType=VSPatchLayer" view:"inline"`
- // [view: inline] [viewif: LayType=LDTLayer] parameterizes laterodorsal tegmentum ACh salience neuromodulatory signal, driven by superior colliculus stimulus novelty, US input / absence, and OFC / ACC inhibition
- LDT LDTParams `viewif:"LayType=LDTLayer" view:"inline" desc:"parameterizes laterodorsal tegmentum ACh salience neuromodulatory signal, driven by superior colliculus stimulus novelty, US input / absence, and OFC / ACC inhibition"`
+ // parameterizes laterodorsal tegmentum ACh salience neuromodulatory signal, driven by superior colliculus stimulus novelty, US input / absence, and OFC / ACC inhibition
+ LDT LDTParams `viewif:"LayType=LDTLayer" view:"inline"`
- // [view: inline] [viewif: LayType=VTALayer] parameterizes computing overall VTA DA based on LHb PVDA (primary value -- at US time, computed at start of each trial and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned value (LV) activations, which update every cycle.
- VTA VTAParams `viewif:"LayType=VTALayer" view:"inline" desc:"parameterizes computing overall VTA DA based on LHb PVDA (primary value -- at US time, computed at start of each trial and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned value (LV) activations, which update every cycle."`
+ // parameterizes computing overall VTA DA based on LHb PVDA (primary value -- at US time, computed at start of each trial and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned value (LV) activations, which update every cycle.
+ VTA VTAParams `viewif:"LayType=VTALayer" view:"inline"`
- // [view: inline] [viewif: LayType=RWPredLayer] parameterizes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework).
- RWPred RWPredParams `viewif:"LayType=RWPredLayer" view:"inline" desc:"parameterizes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework)."`
+ // parameterizes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework).
+ RWPred RWPredParams `viewif:"LayType=RWPredLayer" view:"inline"`
- // [view: inline] [viewif: LayType=RWDaLayer] parameterizes reward prediction dopamine for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework).
- RWDa RWDaParams `viewif:"LayType=RWDaLayer" view:"inline" desc:"parameterizes reward prediction dopamine for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework)."`
+ // parameterizes reward prediction dopamine for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework).
+ RWDa RWDaParams `viewif:"LayType=RWDaLayer" view:"inline"`
- // [view: inline] [viewif: LayType=TDIntegLayer] parameterizes TD reward integration layer
- TDInteg TDIntegParams `viewif:"LayType=TDIntegLayer" view:"inline" desc:"parameterizes TD reward integration layer"`
+ // parameterizes TD reward integration layer
+ TDInteg TDIntegParams `viewif:"LayType=TDIntegLayer" view:"inline"`
- // [view: inline] [viewif: LayType=TDDaLayer] parameterizes dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase.
- TDDa TDDaParams `viewif:"LayType=TDDaLayer" view:"inline" desc:"parameterizes dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase."`
+ // parameterizes dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase.
+ TDDa TDDaParams `viewif:"LayType=TDDaLayer" view:"inline"`
// recv and send projection array access info
- Idxs LayerIdxs `desc:"recv and send projection array access info"`
+ Idxs LayerIdxs
}
func (ly *LayerParams) Update() {
diff --git a/axon/layertypes.go b/axon/layertypes.go
index 22a0bc5c7..ad716a806 100644
--- a/axon/layertypes.go
+++ b/axon/layertypes.go
@@ -4,10 +4,6 @@
package axon
-import (
- "github.com/goki/ki/kit"
-)
-
//gosl: start layertypes
// LayerTypes is an axon-specific layer type enum,
@@ -15,7 +11,7 @@ import (
// Class parameter styles automatically key off of these types.
// The first entries must be kept synchronized with the emer.LayerType,
// although we replace Hidden -> Super.
-type LayerTypes int32
+type LayerTypes int32 //enums:enum
// note: we need to add the Layer extension to avoid naming
// conflicts between layer, projection and other things.
@@ -236,8 +232,6 @@ const (
// between the TDIntegLayer activations in the minus and plus phase.
// These are retrieved from Special LayerVals.
TDDaLayer
-
- LayerTypesN
)
// IsExtLayerType returns true if the layer type deals with external input:
@@ -259,10 +253,3 @@ func (lt LayerTypes) IsExt() bool {
}
return false
}
-
-//go:generate stringer -type=LayerTypes
-
-var KiT_LayerTypes = kit.Enums.AddEnum(LayerTypesN, kit.NotBitFlag, nil)
-
-func (ev LayerTypes) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *LayerTypes) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
diff --git a/axon/layertypes_string.go b/axon/layertypes_string.go
deleted file mode 100644
index 6d88fb397..000000000
--- a/axon/layertypes_string.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Code generated by "stringer -type=LayerTypes"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[SuperLayer-0]
- _ = x[InputLayer-1]
- _ = x[TargetLayer-2]
- _ = x[CompareLayer-3]
- _ = x[CTLayer-4]
- _ = x[PulvinarLayer-5]
- _ = x[TRNLayer-6]
- _ = x[PTMaintLayer-7]
- _ = x[PTPredLayer-8]
- _ = x[PTNotMaintLayer-9]
- _ = x[MatrixLayer-10]
- _ = x[STNLayer-11]
- _ = x[GPLayer-12]
- _ = x[BGThalLayer-13]
- _ = x[VSGatedLayer-14]
- _ = x[BLALayer-15]
- _ = x[CeMLayer-16]
- _ = x[VSPatchLayer-17]
- _ = x[LHbLayer-18]
- _ = x[DrivesLayer-19]
- _ = x[UrgencyLayer-20]
- _ = x[USLayer-21]
- _ = x[PVLayer-22]
- _ = x[LDTLayer-23]
- _ = x[VTALayer-24]
- _ = x[RewLayer-25]
- _ = x[RWPredLayer-26]
- _ = x[RWDaLayer-27]
- _ = x[TDPredLayer-28]
- _ = x[TDIntegLayer-29]
- _ = x[TDDaLayer-30]
- _ = x[LayerTypesN-31]
-}
-
-const _LayerTypes_name = "SuperLayerInputLayerTargetLayerCompareLayerCTLayerPulvinarLayerTRNLayerPTMaintLayerPTPredLayerPTNotMaintLayerMatrixLayerSTNLayerGPLayerBGThalLayerVSGatedLayerBLALayerCeMLayerVSPatchLayerLHbLayerDrivesLayerUrgencyLayerUSLayerPVLayerLDTLayerVTALayerRewLayerRWPredLayerRWDaLayerTDPredLayerTDIntegLayerTDDaLayerLayerTypesN"
-
-var _LayerTypes_index = [...]uint16{0, 10, 20, 31, 43, 50, 63, 71, 83, 94, 109, 120, 128, 135, 146, 158, 166, 174, 186, 194, 205, 217, 224, 231, 239, 247, 255, 266, 275, 286, 298, 307, 318}
-
-func (i LayerTypes) String() string {
- if i < 0 || i >= LayerTypes(len(_LayerTypes_index)-1) {
- return "LayerTypes(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _LayerTypes_name[_LayerTypes_index[i]:_LayerTypes_index[i+1]]
-}
-
-func (i *LayerTypes) FromString(s string) error {
- for j := 0; j < len(_LayerTypes_index)-1; j++ {
- if s == _LayerTypes_name[_LayerTypes_index[j]:_LayerTypes_index[j+1]] {
- *i = LayerTypes(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: LayerTypes")
-}
-
-var _LayerTypes_descMap = map[LayerTypes]string{
- 0: `Super is a superficial cortical layer (lamina 2-3-4) which does not receive direct input or targets. In more generic models, it should be used as a Hidden layer, and maps onto the Hidden type in emer.LayerType.`,
- 1: `Input is a layer that receives direct external input in its Ext inputs. Biologically, it can be a primary sensory layer, or a thalamic layer.`,
- 2: `Target is a layer that receives direct external target inputs used for driving plus-phase learning. Simple target layers are generally not used in more biological models, which instead use predictive learning via Pulvinar or related mechanisms.`,
- 3: `Compare is a layer that receives external comparison inputs, which drive statistics but do NOT drive activation or learning directly. It is rarely used in axon.`,
- 4: `CT are layer 6 corticothalamic projecting neurons, which drive "top down" predictions in Pulvinar layers. They maintain information over time via stronger NMDA channels and use maintained prior state information to generate predictions about current states forming on Super layers that then drive PT (5IB) bursting activity, which are the plus-phase drivers of Pulvinar activity.`,
- 5: `Pulvinar are thalamic relay cell neurons in the higher-order Pulvinar nucleus of the thalamus, and functionally isomorphic neurons in the MD thalamus, and potentially other areas. These cells alternately reflect predictions driven by CT projections, and actual outcomes driven by 5IB Burst activity from corresponding PT or Super layer neurons that provide strong driving inputs.`,
- 6: `TRNLayer is thalamic reticular nucleus layer for inhibitory competition within the thalamus.`,
- 7: `PTMaintLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that exhibit robust, stable maintenance of activity over the duration of a goal engaged window, modulated by basal ganglia (BG) disinhibitory gating, supported by strong MaintNMDA channels and recurrent excitation. The lateral PTSelfMaint projection uses MaintG to drive GMaintRaw input that feeds into the stronger, longer MaintNMDA channels, and the ThalToPT ModulatoryG projection from BGThalamus multiplicatively modulates the strength of other inputs, such that only at the time of BG gating are these strong enough to drive sustained active maintenance. Use Act.Dend.ModGain to parameterize.`,
- 8: `PTPredLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that combine modulatory input from PTMaintLayer sustained maintenance and CTLayer dynamic predictive learning that helps to predict state changes during the period of active goal maintenance. This layer provides the primary input to VSPatch US-timing prediction layers, and other layers that require predictive dynamic`,
- 9: `PTNotMaintLayer implements a tonically active layer that is inhibited by the PTMaintLayer, thereby providing an active representation of the *absence* of maintained PT activity, which is useful for driving appropriate actions (e.g., exploration) when not in goal-engaged mode.`,
- 10: `MatrixLayer represents the matrisome medium spiny neurons (MSNs) that are the main Go / NoGo gating units in BG. These are strongly modulated by phasic dopamine: D1 = Go, D2 = NoGo.`,
- 11: `STNLayer represents subthalamic nucleus neurons, with two subtypes: STNp are more strongly driven and get over bursting threshold, driving strong, rapid activation of the KCa channels, causing a long pause in firing, which creates a window during which GPe dynamics resolve Go vs. No balance. STNs are more weakly driven and thus more slowly activate KCa, resulting in a longer period of activation, during which the GPi is inhibited to prevent premature gating based only MtxGo inhibition -- gating only occurs when GPeIn signal has had a chance to integrate its MtxNo inputs.`,
- 12: `GPLayer represents a globus pallidus layer in the BG, including: GPeOut, GPeIn, GPeTA (arkypallidal), and GPi. Typically just a single unit per Pool representing a given stripe.`,
- 13: `BGThalLayer represents a BG gated thalamic layer, which receives BG gating in the form of an inhibitory projection from GPi. Located mainly in the Ventral thalamus: VA / VM / VL, and also parts of MD mediodorsal thalamus.`,
- 14: `VSGated represents explicit coding of VS gating status: JustGated and HasGated (since last US or failed predicted US), For visualization and / or motor action signaling.`,
- 15: `BLALayer represents a basolateral amygdala layer which learns to associate arbitrary stimuli (CSs) with behaviorally salient outcomes (USs)`,
- 16: `CeMLayer represents a central nucleus of the amygdala layer.`,
- 17: `VSPatchLayer represents a ventral striatum patch layer, which learns to represent the expected amount of dopamine reward and projects both directly with shunting inhibition to the VTA and indirectly via the LHb / RMTg to cancel phasic dopamine firing to expected rewards (i.e., reward prediction error).`,
- 18: `LHbLayer represents the lateral habenula, which drives dipping in the VTA. It tracks the Global LHb values for visualization purposes -- updated by VTALayer.`,
- 19: `DrivesLayer represents the Drives in PVLV framework. It tracks the Global Drives values for visualization and predictive learning purposes.`,
- 20: `UrgencyLayer represents the Urgency factor in PVLV framework. It tracks the Global Urgency.Urge value for visualization and predictive learning purposes.`,
- 21: `USLayer represents a US unconditioned stimulus layer (USpos or USneg). It tracks the Global USpos or USneg, for visualization and predictive learning purposes. Actual US inputs are set in PVLV.`,
- 22: `PVLayer represents a PV primary value layer (PVpos or PVneg) representing the total primary value as a function of US inputs, drives, and effort. It tracks the Global VTA.PVpos, PVneg values for visualization and predictive learning purposes.`,
- 23: `LDTLayer represents the laterodorsal tegmentum layer, which is the primary limbic ACh (acetylcholine) driver to other ACh: BG cholinergic interneurons (CIN) and nucleus basalis ACh areas. The phasic ACh release signals reward salient inputs from CS, US and US omssion, and it drives widespread disinhibition of BG gating and VTA DA firing. It receives excitation from superior colliculus which computes a temporal derivative (stimulus specific adaptation, SSA) of sensory inputs, and inhibitory input from OFC, ACC driving suppression of distracting inputs during goal-engaged states.`,
- 24: `VTALayer represents the ventral tegmental area, which releases dopamine. It computes final DA value from PVLV-computed LHb PVDA (primary value DA), updated at start of each trial from updated US, Effort, etc state, and cycle-by-cycle LV learned value state reflecting CS inputs, in the Amygdala (CeM). Its activity reflects this DA level, which is effectively broadcast vial Global state values to all layers.`,
- 25: `RewLayer represents positive or negative reward values across 2 units, showing spiking rates for each, and Act always represents signed value.`,
- 26: `RWPredLayer computes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework). Activity is computed as linear function of excitatory conductance (which can be negative -- there are no constraints). Use with RWPrjn which does simple delta-rule learning on minus-plus.`,
- 27: `RWDaLayer computes a dopamine (DA) signal based on a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework). It computes difference between r(t) and RWPred values. r(t) is accessed directly from a Rew layer -- if no external input then no DA is computed -- critical for effective use of RW only for PV cases. RWPred prediction is also accessed directly from Rew layer to avoid any issues.`,
- 28: `TDPredLayer is the temporal differences reward prediction layer. It represents estimated value V(t) in the minus phase, and computes estimated V(t+1) based on its learned weights in plus phase, using the TDPredPrjn projection type for DA modulated learning.`,
- 29: `TDIntegLayer is the temporal differences reward integration layer. It represents estimated value V(t) from prior time step in the minus phase, and estimated discount * V(t+1) + r(t) in the plus phase. It gets Rew, PrevPred from Context.NeuroMod, and Special LayerVals from TDPredLayer.`,
- 30: `TDDaLayer computes a dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase. These are retrieved from Special LayerVals.`,
- 31: ``,
-}
-
-func (i LayerTypes) Desc() string {
- if str, ok := _LayerTypes_descMap[i]; ok {
- return str
- }
- return "LayerTypes(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/layervals.go b/axon/layervals.go
index 1465615ee..a96731f84 100644
--- a/axon/layervals.go
+++ b/axon/layervals.go
@@ -12,22 +12,22 @@ package axon
type ActAvgVals struct {
// running-average minus-phase activity integrated at Dt.LongAvgTau -- used for adapting inhibition relative to target level
- ActMAvg float32 `inactive:"+" desc:"running-average minus-phase activity integrated at Dt.LongAvgTau -- used for adapting inhibition relative to target level"`
+ ActMAvg float32 `inactive:"+"`
// running-average plus-phase activity integrated at Dt.LongAvgTau
- ActPAvg float32 `inactive:"+" desc:"running-average plus-phase activity integrated at Dt.LongAvgTau"`
+ ActPAvg float32 `inactive:"+"`
// running-average max of minus-phase Ge value across the layer integrated at Dt.LongAvgTau
- AvgMaxGeM float32 `inactive:"+" desc:"running-average max of minus-phase Ge value across the layer integrated at Dt.LongAvgTau"`
+ AvgMaxGeM float32 `inactive:"+"`
// running-average max of minus-phase Gi value across the layer integrated at Dt.LongAvgTau
- AvgMaxGiM float32 `inactive:"+" desc:"running-average max of minus-phase Gi value across the layer integrated at Dt.LongAvgTau"`
+ AvgMaxGiM float32 `inactive:"+"`
// multiplier on inhibition -- adapted to maintain target activity level
- GiMult float32 `inactive:"+" desc:"multiplier on inhibition -- adapted to maintain target activity level"`
+ GiMult float32 `inactive:"+"`
// adaptive threshold -- only used for specialized layers, e.g., VSPatch
- AdaptThr float32 `inactive:"+" desc:"adaptive threshold -- only used for specialized layers, e.g., VSPatch"`
+ AdaptThr float32 `inactive:"+"`
pad, pad1 float32
}
@@ -46,13 +46,13 @@ func (lv *ActAvgVals) Init() {
type CorSimStats struct {
// correlation (centered cosine aka normalized dot product) activation difference between ActP and ActM on this alpha-cycle for this layer -- computed by CorSimFmActs called by PlusPhase
- Cor float32 `inactive:"+" desc:"correlation (centered cosine aka normalized dot product) activation difference between ActP and ActM on this alpha-cycle for this layer -- computed by CorSimFmActs called by PlusPhase"`
+ Cor float32 `inactive:"+"`
// running average of correlation similarity between ActP and ActM -- computed with CorSim.Tau time constant in PlusPhase
- Avg float32 `inactive:"+" desc:"running average of correlation similarity between ActP and ActM -- computed with CorSim.Tau time constant in PlusPhase"`
+ Avg float32 `inactive:"+"`
// running variance of correlation similarity between ActP and ActM -- computed with CorSim.Tau time constant in PlusPhase
- Var float32 `inactive:"+" desc:"running variance of correlation similarity between ActP and ActM -- computed with CorSim.Tau time constant in PlusPhase"`
+ Var float32 `inactive:"+"`
pad float32
}
@@ -69,16 +69,16 @@ func (cd *CorSimStats) Init() {
type LaySpecialVals struct {
// one value
- V1 float32 `inactive:"+" desc:"one value"`
+ V1 float32 `inactive:"+"`
// one value
- V2 float32 `inactive:"+" desc:"one value"`
+ V2 float32 `inactive:"+"`
// one value
- V3 float32 `inactive:"+" desc:"one value"`
+ V3 float32 `inactive:"+"`
// one value
- V4 float32 `inactive:"+" desc:"one value"`
+ V4 float32 `inactive:"+"`
}
func (lv *LaySpecialVals) Init() {
@@ -92,24 +92,24 @@ func (lv *LaySpecialVals) Init() {
// It is sync'd down from the GPU to the CPU after every Cycle.
type LayerVals struct {
- // [view: -] layer index for these vals
- LayIdx uint32 `view:"-" desc:"layer index for these vals"`
+ // layer index for these vals
+ LayIdx uint32 `view:"-"`
- // [view: -] data index for these vals
- DataIdx uint32 `view:"-" desc:"data index for these vals"`
+ // data index for these vals
+ DataIdx uint32 `view:"-"`
// reaction time for this layer in cycles, which is -1 until the Max CaSpkP level (after MaxCycStart) exceeds the Act.Attn.RTThr threshold
- RT float32 `inactive:"-" desc:"reaction time for this layer in cycles, which is -1 until the Max CaSpkP level (after MaxCycStart) exceeds the Act.Attn.RTThr threshold"`
+ RT float32 `inactive:"-"`
pad uint32
- // [view: inline] running-average activation levels used for adaptive inhibition, and other adapting values
- ActAvg ActAvgVals `view:"inline" desc:"running-average activation levels used for adaptive inhibition, and other adapting values"`
+ // running-average activation levels used for adaptive inhibition, and other adapting values
+ ActAvg ActAvgVals `view:"inline"`
// correlation (centered cosine aka normalized dot product) similarity between ActM, ActP states
- CorSim CorSimStats `desc:"correlation (centered cosine aka normalized dot product) similarity between ActM, ActP states"`
+ CorSim CorSimStats
- // [view: inline] special values used to communicate to other layers based on neural values computed on the GPU -- special cross-layer computations happen CPU-side and are sent back into the network via Context on the next cycle -- used for special algorithms such as RL / DA etc
- Special LaySpecialVals `view:"inline" desc:"special values used to communicate to other layers based on neural values computed on the GPU -- special cross-layer computations happen CPU-side and are sent back into the network via Context on the next cycle -- used for special algorithms such as RL / DA etc"`
+ // special values used to communicate to other layers based on neural values computed on the GPU -- special cross-layer computations happen CPU-side and are sent back into the network via Context on the next cycle -- used for special algorithms such as RL / DA etc
+ Special LaySpecialVals `view:"inline"`
}
func (lv *LayerVals) Init() {
diff --git a/axon/learn.go b/axon/learn.go
index f228f6a34..848ac922b 100644
--- a/axon/learn.go
+++ b/axon/learn.go
@@ -7,10 +7,10 @@ package axon
import (
"github.com/emer/axon/chans"
"github.com/emer/axon/kinase"
- "github.com/emer/emergent/erand"
- "github.com/emer/etable/minmax"
- "github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/gosl/v2/slbool"
+ "goki.dev/mat32/v2"
)
///////////////////////////////////////////////////////////////////////
@@ -30,29 +30,29 @@ import (
// CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
type CaLrnParams struct {
- // [def: 80] denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance
- Norm float32 `def:"80" desc:"denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance"`
+ // denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance
+ Norm float32 `def:"80"`
- // [def: true] use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike
- SpkVGCC slbool.Bool `def:"true" desc:"use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike"`
+ // use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike
+ SpkVGCC slbool.Bool `def:"true"`
- // [def: 35] multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode
- SpkVgccCa float32 `def:"35" desc:"multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode"`
+ // multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode
+ SpkVgccCa float32 `def:"35"`
- // [def: 10] time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn
- VgccTau float32 `def:"10" desc:"time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn"`
+ // time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn
+ VgccTau float32 `def:"10"`
- // [view: inline] time constants for integrating CaLrn across M, P and D cascading levels
- Dt kinase.CaDtParams `view:"inline" desc:"time constants for integrating CaLrn across M, P and D cascading levels"`
+ // time constants for integrating CaLrn across M, P and D cascading levels
+ Dt kinase.CaDtParams `view:"inline"`
- // [def: 0.01,0.02,0.5] Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default.
- UpdtThr float32 `def:"0.01,0.02,0.5" desc:"Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default."`
+ // Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default.
+ UpdtThr float32 `def:"0.01,0.02,0.5"`
- // [view: -] rate = 1 / tau
- VgccDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ VgccDt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
- // [view: -] = 1 / Norm
- NormInv float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"= 1 / Norm"`
+ // = 1 / Norm
+ NormInv float32 `view:"-" json:"-" xml:"-" inactive:"+"`
pad int32
}
@@ -104,19 +104,19 @@ func (np *CaLrnParams) CaLrns(ctx *Context, ni, di uint32) {
// and RLRate as a proxy for the activation (spiking) based learning signal.
type CaSpkParams struct {
- // [def: 8,12] gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdtThr. Prjn.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller.
- SpikeG float32 `def:"8,12" desc:"gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdtThr. Prjn.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller."`
+ // gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdtThr. Prjn.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller.
+ SpikeG float32 `def:"8,12"`
- // [def: 30] [min: 1] time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PrjnParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau)
- SynTau float32 `def:"30" min:"1" desc:"time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PrjnParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau)"`
+ // time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PrjnParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau)
+ SynTau float32 `def:"30" min:"1"`
- // [view: -] rate = 1 / tau
- SynDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ SynDt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
pad int32
- // [view: inline] time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Prjn level for synaptic integration, except for the M factor.
- Dt kinase.CaDtParams `view:"inline" desc:"time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Prjn level for synaptic integration, except for the M factor."`
+ // time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Prjn level for synaptic integration, except for the M factor.
+ Dt kinase.CaDtParams `view:"inline"`
}
func (np *CaSpkParams) Defaults() {
@@ -149,30 +149,30 @@ func (np *CaSpkParams) CaFmSpike(ctx *Context, ni, di uint32) {
type TrgAvgActParams struct {
// whether to use target average activity mechanism to scale synaptic weights
- On slbool.Bool `desc:"whether to use target average activity mechanism to scale synaptic weights"`
+ On slbool.Bool
// if this is > 0, then each neuron's GiBase is initialized as this proportion of TrgRange.Max - TrgAvg -- gives neurons differences in intrinsic inhibition / leak as a starting bias
- GiBaseInit float32 `desc:"if this is > 0, then each neuron's GiBase is initialized as this proportion of TrgRange.Max - TrgAvg -- gives neurons differences in intrinsic inhibition / leak as a starting bias"`
+ GiBaseInit float32
- // [def: 0.02] [viewif: On] learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference.
- ErrLRate float32 `viewif:"On" def:"0.02" desc:"learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference."`
+ // learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference.
+ ErrLRate float32 `viewif:"On" def:"0.02"`
- // [def: 0.005,0.0002] [viewif: On] rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value.
- SynScaleRate float32 `viewif:"On" def:"0.005,0.0002" desc:"rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value."`
+ // rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value.
+ SynScaleRate float32 `viewif:"On" def:"0.005,0.0002"`
- // [def: 0,1] [viewif: On] amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point.
- SubMean float32 `viewif:"On" def:"0,1" desc:"amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point."`
+ // amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point.
+ SubMean float32 `viewif:"On" def:"0,1"`
- // [def: true] [viewif: On] permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used
- Permute slbool.Bool `viewif:"On" def:"true" desc:"permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used"`
+ // permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used
+ Permute slbool.Bool `viewif:"On" def:"true"`
- // [viewif: On] use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool
- Pool slbool.Bool `viewif:"On" desc:"use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool"`
+ // use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool
+ Pool slbool.Bool `viewif:"On"`
pad int32
- // [def: {'Min':0.5,'Max':2}] [viewif: On] range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range.
- TrgRange minmax.F32 `viewif:"On" def:"{'Min':0.5,'Max':2}" desc:"range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range."`
+ // range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range.
+ TrgRange minmax.F32 `viewif:"On" def:"{'Min':0.5,'Max':2}"`
}
func (ta *TrgAvgActParams) Update() {
@@ -197,23 +197,23 @@ func (ta *TrgAvgActParams) Defaults() {
// activity levels, and based on the phase-wise differences in activity (Diff).
type RLRateParams struct {
- // [def: true] use learning rate modulation
- On slbool.Bool `def:"true" desc:"use learning rate modulation"`
+ // use learning rate modulation
+ On slbool.Bool `def:"true"`
- // [def: 0.05,1] [viewif: On] minimum learning rate multiplier for sigmoidal act (1-act) factor -- prevents lrate from going too low for extreme values. Set to 1 to disable Sigmoid derivative factor, which is default for Target layers.
- SigmoidMin float32 `viewif:"On" def:"0.05,1" desc:"minimum learning rate multiplier for sigmoidal act (1-act) factor -- prevents lrate from going too low for extreme values. Set to 1 to disable Sigmoid derivative factor, which is default for Target layers."`
+ // minimum learning rate multiplier for sigmoidal act (1-act) factor -- prevents lrate from going too low for extreme values. Set to 1 to disable Sigmoid derivative factor, which is default for Target layers.
+ SigmoidMin float32 `viewif:"On" def:"0.05,1"`
- // [viewif: On] modulate learning rate as a function of plus - minus differences
- Diff slbool.Bool `viewif:"On" desc:"modulate learning rate as a function of plus - minus differences"`
+ // modulate learning rate as a function of plus - minus differences
+ Diff slbool.Bool `viewif:"On"`
- // [def: 0.1] [viewif: On&&Diff] threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies -- must be > 0 to prevent div by zero
- SpkThr float32 `viewif:"On&&Diff" def:"0.1" desc:"threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies -- must be > 0 to prevent div by zero"`
+ // threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies -- must be > 0 to prevent div by zero
+ SpkThr float32 `viewif:"On&&Diff" def:"0.1"`
- // [def: 0.02] [viewif: On&&Diff] threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value
- DiffThr float32 `viewif:"On&&Diff" def:"0.02" desc:"threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value"`
+ // threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value
+ DiffThr float32 `viewif:"On&&Diff" def:"0.02"`
- // [def: 0.001] [viewif: On&&Diff] for Diff component, minimum learning rate value when below ActDiffThr
- Min float32 `viewif:"On&&Diff" def:"0.001" desc:"for Diff component, minimum learning rate value when below ActDiffThr"`
+ // for Diff component, minimum learning rate value when below ActDiffThr
+ Min float32 `viewif:"On&&Diff" def:"0.001"`
pad, pad1 int32
}
@@ -270,23 +270,23 @@ func (rl *RLRateParams) RLRateDiff(scap, scad float32) float32 {
// This is mainly the running average activations that drive learning
type LearnNeurParams struct {
- // [view: inline] parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
- CaLearn CaLrnParams `view:"inline" desc:"parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase)."`
+ // parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
+ CaLearn CaLrnParams `view:"inline"`
- // [view: inline] parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdtThr and RLRate as a proxy for the activation (spiking) based learning signal.
- CaSpk CaSpkParams `view:"inline" desc:"parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdtThr and RLRate as a proxy for the activation (spiking) based learning signal."`
+ // parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdtThr and RLRate as a proxy for the activation (spiking) based learning signal.
+ CaSpk CaSpkParams `view:"inline"`
- // [view: inline] NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes
- LrnNMDA chans.NMDAParams `view:"inline" desc:"NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes"`
+ // NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes
+ LrnNMDA chans.NMDAParams `view:"inline"`
- // [view: inline] synaptic scaling parameters for regulating overall average activity compared to neuron's own target level
- TrgAvgAct TrgAvgActParams `view:"inline" desc:"synaptic scaling parameters for regulating overall average activity compared to neuron's own target level"`
+ // synaptic scaling parameters for regulating overall average activity compared to neuron's own target level
+ TrgAvgAct TrgAvgActParams `view:"inline"`
- // [view: inline] recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)
- RLRate RLRateParams `view:"inline" desc:"recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)"`
+ // recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)
+ RLRate RLRateParams `view:"inline"`
- // [view: inline] neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms
- NeuroMod NeuroModParams `view:"inline" desc:"neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms"`
+ // neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms
+ NeuroMod NeuroModParams `view:"inline"`
}
func (ln *LearnNeurParams) Update() {
@@ -419,17 +419,17 @@ func SigInvFun61(w float32) float32 {
// SWtInitParams for initial SWt values
type SWtInitParams struct {
- // [def: 0,1,0.5] [min: 0] [max: 1] how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed.
- SPct float32 `min:"0" max:"1" def:"0,1,0.5" desc:"how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed."`
+ // how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed.
+ SPct float32 `min:"0" max:"1" def:"0,1,0.5"`
- // [def: 0.5,0.4] target mean weight values across receiving neuron's projection -- the mean SWt values are constrained to remain at this value. some projections may benefit from lower mean of .4
- Mean float32 `def:"0.5,0.4" desc:"target mean weight values across receiving neuron's projection -- the mean SWt values are constrained to remain at this value. some projections may benefit from lower mean of .4"`
+ // target mean weight values across receiving neuron's projection -- the mean SWt values are constrained to remain at this value. some projections may benefit from lower mean of .4
+ Mean float32 `def:"0.5,0.4"`
- // [def: 0.25] initial variance in weight values, prior to constraints.
- Var float32 `def:"0.25" desc:"initial variance in weight values, prior to constraints."`
+ // initial variance in weight values, prior to constraints.
+ Var float32 `def:"0.25"`
- // [def: true] symmetrize the initial weight values with those in reciprocal projection -- typically true for bidirectional excitatory connections
- Sym slbool.Bool `def:"true" desc:"symmetrize the initial weight values with those in reciprocal projection -- typically true for bidirectional excitatory connections"`
+ // symmetrize the initial weight values with those in reciprocal projection -- typically true for bidirectional excitatory connections
+ Sym slbool.Bool `def:"true"`
}
func (sp *SWtInitParams) Defaults() {
@@ -446,16 +446,16 @@ func (sp *SWtInitParams) Update() {
type SWtAdaptParams struct {
// if true, adaptation is active -- if false, SWt values are not updated, in which case it is generally good to have Init.SPct=0 too.
- On slbool.Bool `desc:"if true, adaptation is active -- if false, SWt values are not updated, in which case it is generally good to have Init.SPct=0 too."`
+ On slbool.Bool
- // [def: 0.1,0.01,0.001,0.0002] [viewif: On] learning rate multiplier on the accumulated DWt values (which already have fast LRate applied) to incorporate into SWt during slow outer loop updating -- lower values impose stronger constraints, for larger networks that need more structural support, e.g., 0.001 is better after 1,000 epochs in large models. 0.1 is fine for smaller models.
- LRate float32 `viewif:"On" def:"0.1,0.01,0.001,0.0002" desc:"learning rate multiplier on the accumulated DWt values (which already have fast LRate applied) to incorporate into SWt during slow outer loop updating -- lower values impose stronger constraints, for larger networks that need more structural support, e.g., 0.001 is better after 1,000 epochs in large models. 0.1 is fine for smaller models."`
+ // learning rate multiplier on the accumulated DWt values (which already have fast LRate applied) to incorporate into SWt during slow outer loop updating -- lower values impose stronger constraints, for larger networks that need more structural support, e.g., 0.001 is better after 1,000 epochs in large models. 0.1 is fine for smaller models.
+ LRate float32 `viewif:"On" def:"0.1,0.01,0.001,0.0002"`
- // [def: 1] [viewif: On] amount of mean to subtract from SWt delta when updating -- generally best to set to 1
- SubMean float32 `viewif:"On" def:"1" desc:"amount of mean to subtract from SWt delta when updating -- generally best to set to 1"`
+ // amount of mean to subtract from SWt delta when updating -- generally best to set to 1
+ SubMean float32 `viewif:"On" def:"1"`
- // [def: 6] [viewif: On] gain of sigmoidal constrast enhancement function used to transform learned, linear LWt values into Wt values
- SigGain float32 `viewif:"On" def:"6" desc:"gain of sigmoidal constrast enhancement function used to transform learned, linear LWt values into Wt values"`
+ // gain of sigmoidal constrast enhancement function used to transform learned, linear LWt values into Wt values
+ SigGain float32 `viewif:"On" def:"6"`
}
func (sp *SWtAdaptParams) Defaults() {
@@ -492,14 +492,14 @@ func (sp *SWtInitParams) RndVar(rnd erand.Rand) float32 {
// more dynamic and supported by the regular learned weights.
type SWtParams struct {
- // [view: inline] initialization of SWt values
- Init SWtInitParams `view:"inline" desc:"initialization of SWt values"`
+ // initialization of SWt values
+ Init SWtInitParams `view:"inline"`
- // [view: inline] adaptation of SWt values in response to LWt learning
- Adapt SWtAdaptParams `view:"inline" desc:"adaptation of SWt values in response to LWt learning"`
+ // adaptation of SWt values in response to LWt learning
+ Adapt SWtAdaptParams `view:"inline"`
- // [def: {'Min':0.2,'Max':0.8}] [view: inline] range limits for SWt values
- Limit minmax.F32 `def:"{'Min':0.2,'Max':0.8}" view:"inline" desc:"range limits for SWt values"`
+ // range limits for SWt values
+ Limit minmax.F32 `def:"{'Min':0.2,'Max':0.8}" view:"inline"`
}
func (sp *SWtParams) Defaults() {
@@ -633,17 +633,17 @@ func (sp *SWtParams) InitWtsSyn(ctx *Context, syni uint32, rnd erand.Rand, mean,
// LRateParams manages learning rate parameters
type LRateParams struct {
- // [def: 0.04,0.1,0.2] base learning rate for this projection -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2.
- Base float32 `def:"0.04,0.1,0.2" desc:"base learning rate for this projection -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2."`
+ // base learning rate for this projection -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2.
+ Base float32 `def:"0.04,0.1,0.2"`
// scheduled learning rate multiplier, simulating reduction in plasticity over aging
- Sched float32 `desc:"scheduled learning rate multiplier, simulating reduction in plasticity over aging"`
+ Sched float32
// dynamic learning rate modulation due to neuromodulatory or other such factors
- Mod float32 `desc:"dynamic learning rate modulation due to neuromodulatory or other such factors"`
+ Mod float32
// effective actual learning rate multiplier used in computing DWt: Eff = eMod * Sched * Base
- Eff float32 `inactive:"+" desc:"effective actual learning rate multiplier used in computing DWt: Eff = eMod * Sched * Base"`
+ Eff float32 `inactive:"+"`
}
func (ls *LRateParams) Defaults() {
@@ -671,17 +671,17 @@ func (ls *LRateParams) Init() {
// TraceParams manages learning rate parameters
type TraceParams struct {
- // [def: 1,2,4] time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace
- Tau float32 `def:"1,2,4" desc:"time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace"`
+ // time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace
+ Tau float32 `def:"1,2,4"`
- // [def: 0,1] amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning projections, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special prjn types (e.g., Hebb) benefit from SubMean = 1 always
- SubMean float32 `def:"0,1" desc:"amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning projections, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special prjn types (e.g., Hebb) benefit from SubMean = 1 always"`
+ // amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning projections, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special prjn types (e.g., Hebb) benefit from SubMean = 1 always
+ SubMean float32 `def:"0,1"`
// threshold for learning, depending on different algorithms -- in Matrix and VSPatch it applies to normalized GeIntNorm value -- setting this relatively high encourages sparser representations
- LearnThr float32 `desc:"threshold for learning, depending on different algorithms -- in Matrix and VSPatch it applies to normalized GeIntNorm value -- setting this relatively high encourages sparser representations"`
+ LearnThr float32
- // [view: -] rate = 1 / tau
- Dt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ Dt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
}
func (tp *TraceParams) Defaults() {
@@ -712,15 +712,15 @@ func (tp *TraceParams) TrFmCa(tr float32, ca float32) float32 {
type LRateMod struct {
// toggle use of this modulation factor
- On slbool.Bool `desc:"toggle use of this modulation factor"`
+ On slbool.Bool
- // [viewif: On] [min: 0] [max: 1] baseline learning rate -- what you get for correct cases
- Base float32 `viewif:"On" min:"0" max:"1" desc:"baseline learning rate -- what you get for correct cases"`
+ // baseline learning rate -- what you get for correct cases
+ Base float32 `viewif:"On" min:"0" max:"1"`
pad, pad1 int32
- // [viewif: On] defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1
- Range minmax.F32 `viewif:"On" desc:"defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1"`
+ // defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1
+ Range minmax.F32 `viewif:"On"`
}
func (lr *LRateMod) Defaults() {
@@ -770,18 +770,18 @@ func (lr *LRateMod) LRateMod(net *Network, fact float32) float32 {
type LearnSynParams struct {
// enable learning for this projection
- Learn slbool.Bool `desc:"enable learning for this projection"`
+ Learn slbool.Bool
pad, pad1, pad2 int32
- // [viewif: Learn] learning rate parameters, supporting two levels of modulation on top of base learning rate.
- LRate LRateParams `viewif:"Learn" desc:"learning rate parameters, supporting two levels of modulation on top of base learning rate."`
+ // learning rate parameters, supporting two levels of modulation on top of base learning rate.
+ LRate LRateParams `viewif:"Learn"`
- // [viewif: Learn] trace-based learning parameters
- Trace TraceParams `viewif:"Learn" desc:"trace-based learning parameters"`
+ // trace-based learning parameters
+ Trace TraceParams `viewif:"Learn"`
- // [view: inline] [viewif: Learn] kinase calcium Ca integration parameters
- KinaseCa kinase.CaParams `viewif:"Learn" view:"inline" desc:"kinase calcium Ca integration parameters"`
+ // kinase calcium Ca integration parameters
+ KinaseCa kinase.CaParams `viewif:"Learn" view:"inline"`
}
func (ls *LearnSynParams) Update() {
diff --git a/axon/logging.go b/axon/logging.go
index 8dd13aaff..cff928567 100644
--- a/axon/logging.go
+++ b/axon/logging.go
@@ -7,19 +7,19 @@ package axon
import (
"strconv"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/metric"
- "github.com/emer/etable/minmax"
- "github.com/emer/etable/norm"
- "github.com/emer/etable/split"
- "github.com/emer/etable/tsragg"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/metric"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/etable/v2/norm"
+ "goki.dev/etable/v2/split"
+ "goki.dev/etable/v2/tsragg"
)
// LogTestErrors records all errors made across TestTrials, at Test Epoch scope
@@ -627,11 +627,13 @@ func LayerActsLogRecReset(lg *elog.Logs) {
// LayerActsLogConfigGUI configures GUI for LayerActsLog Plot and LayerActs Avg Plot
func LayerActsLogConfigGUI(lg *elog.Logs, gui *egui.GUI) {
- plt := gui.TabView.AddNewTab(eplot.KiT_Plot2D, "LayerActs Plot").(*eplot.Plot2D)
+ pt := gui.Tabs.NewTab("LayerActs Plot")
+ plt := eplot.NewPlot2D(pt)
gui.Plots["LayerActs"] = plt
plt.SetTable(lg.MiscTables["LayerActs"])
- plt = gui.TabView.AddNewTab(eplot.KiT_Plot2D, "LayerActs Avg Plot").(*eplot.Plot2D)
+ pt = gui.Tabs.NewTab("LayerActs Avg Plot")
+ plt = eplot.NewPlot2D(pt)
gui.Plots["LayerActsAvg"] = plt
plt.SetTable(lg.MiscTables["LayerActsAvg"])
}
diff --git a/axon/looper.go b/axon/looper.go
index e5f5b9557..91ddd9e03 100644
--- a/axon/looper.go
+++ b/axon/looper.go
@@ -5,11 +5,11 @@
package axon
import (
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
)
// LooperStdPhases adds the minus and plus phases of the theta cycle,
diff --git a/axon/network.go b/axon/network.go
index 776a2d32a..82d74ca8a 100644
--- a/axon/network.go
+++ b/axon/network.go
@@ -9,11 +9,9 @@ import (
"strings"
"github.com/c2h5oh/datasize"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/etensor"
- "github.com/goki/ki/ki"
- "github.com/goki/ki/kit"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/prjn"
+ "goki.dev/etable/v2/etensor"
)
// axon.Network implements the Axon spiking model,
@@ -23,8 +21,6 @@ type Network struct {
NetworkBase
}
-var KiT_Network = kit.Types.AddType(&Network{}, NetworkProps)
-
// InitName MUST be called to initialize the network's pointer to itself as an emer.Network
// which enables the proper interface methods to be called. Also sets the name,
// and initializes NetIdx in global list of Network
@@ -695,6 +691,7 @@ func (nt *Network) SizeReport(detail bool) string {
//////////////////////////////////////////////////////////////////////////////////////
// Network props for gui
+/*
var NetworkProps = ki.Props{
"ToolBar": ki.PropSlice{
{"SaveWtsJSON", ki.Props{
@@ -774,3 +771,4 @@ var NetworkProps = ki.Props{
}},
},
}
+*/
diff --git a/axon/network_test.go b/axon/network_test.go
index dfa7d38ef..1ebb36635 100644
--- a/axon/network_test.go
+++ b/axon/network_test.go
@@ -5,7 +5,7 @@ package axon
import (
"testing"
- "github.com/emer/emergent/emer"
+ "github.com/emer/emergent/v2/emer"
"github.com/stretchr/testify/assert"
)
diff --git a/axon/networkbase.go b/axon/networkbase.go
index 792fd19dc..b30a3d77e 100644
--- a/axon/networkbase.go
+++ b/axon/networkbase.go
@@ -20,150 +20,150 @@ import (
"strings"
"time"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
- "github.com/emer/emergent/timer"
- "github.com/emer/emergent/weights"
- "github.com/goki/gi/gi"
- "github.com/goki/ki/indent"
- "github.com/goki/kigen/dedupe"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
+ "github.com/emer/emergent/v2/timer"
+ "github.com/emer/emergent/v2/weights"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/glop/dedupe"
+ "goki.dev/glop/indent"
+ "goki.dev/mat32/v2"
)
// NetworkBase manages the basic structural components of a network (layers).
// The main Network then can just have the algorithm-specific code.
type NetworkBase struct {
- // [view: -] we need a pointer to ourselves as an emer.Network, which can always be used to extract the true underlying type of object when network is embedded in other structs -- function receivers do not have this ability so this is necessary.
- EmerNet emer.Network `copy:"-" json:"-" xml:"-" view:"-" desc:"we need a pointer to ourselves as an emer.Network, which can always be used to extract the true underlying type of object when network is embedded in other structs -- function receivers do not have this ability so this is necessary."`
+ // we need a pointer to ourselves as an emer.Network, which can always be used to extract the true underlying type of object when network is embedded in other structs -- function receivers do not have this ability so this is necessary.
+ EmerNet emer.Network `copy:"-" json:"-" xml:"-" view:"-"`
// overall name of network -- helps discriminate if there are multiple
- Nm string `desc:"overall name of network -- helps discriminate if there are multiple"`
+ Nm string
// filename of last weights file loaded or saved
- WtsFile string `desc:"filename of last weights file loaded or saved"`
+ WtsFile string
// PVLV system for phasic dopamine signaling, including internal drives, US outcomes. Core LHb (lateral habenula) and VTA (ventral tegmental area) dopamine are computed in equations using inputs from specialized network layers (LDTLayer driven by BLA, CeM layers, VSPatchLayer). Renders USLayer, PVLayer, DrivesLayer representations based on state updated here.
- PVLV PVLV `desc:"PVLV system for phasic dopamine signaling, including internal drives, US outcomes. Core LHb (lateral habenula) and VTA (ventral tegmental area) dopamine are computed in equations using inputs from specialized network layers (LDTLayer driven by BLA, CeM layers, VSPatchLayer). Renders USLayer, PVLayer, DrivesLayer representations based on state updated here."`
+ PVLV PVLV
- // [view: -] map of name to layers -- layer names must be unique
- LayMap map[string]*Layer `view:"-" desc:"map of name to layers -- layer names must be unique"`
+ // map of name to layers -- layer names must be unique
+ LayMap map[string]*Layer `view:"-"`
- // [view: -] map of layer classes -- made during Build
- LayClassMap map[string][]string `view:"-" desc:"map of layer classes -- made during Build"`
+ // map of layer classes -- made during Build
+ LayClassMap map[string][]string `view:"-"`
- // [view: -] minimum display position in network
- MinPos mat32.Vec3 `view:"-" desc:"minimum display position in network"`
+ // minimum display position in network
+ MinPos mat32.Vec3 `view:"-"`
- // [view: -] maximum display position in network
- MaxPos mat32.Vec3 `view:"-" desc:"maximum display position in network"`
+ // maximum display position in network
+ MaxPos mat32.Vec3 `view:"-"`
// optional metadata that is saved in network weights files -- e.g., can indicate number of epochs that were trained, or any other information about this network that would be useful to save
- MetaData map[string]string `desc:"optional metadata that is saved in network weights files -- e.g., can indicate number of epochs that were trained, or any other information about this network that would be useful to save"`
+ MetaData map[string]string
// if true, the neuron and synapse variables will be organized into a gpu-optimized memory order, otherwise cpu-optimized. This must be set before network Build() is called.
- UseGPUOrder bool `inactive:"+" desc:"if true, the neuron and synapse variables will be organized into a gpu-optimized memory order, otherwise cpu-optimized. This must be set before network Build() is called."`
+ UseGPUOrder bool `inactive:"+"`
- // [view: -] network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode
- NetIdx uint32 `view:"-" desc:"network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode"`
+ // network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode
+ NetIdx uint32 `view:"-"`
- // [view: -] maximum synaptic delay across any projection in the network -- used for sizing the GBuf accumulation buffer.
- MaxDelay uint32 `inactive:"+" view:"-" desc:"maximum synaptic delay across any projection in the network -- used for sizing the GBuf accumulation buffer."`
+ // maximum synaptic delay across any projection in the network -- used for sizing the GBuf accumulation buffer.
+ MaxDelay uint32 `inactive:"+" view:"-"`
// maximum number of data inputs that can be processed in parallel in one pass of the network. Neuron storage is allocated to hold this amount during Build process, and this value reflects that.
- MaxData uint32 `inactive:"+" desc:"maximum number of data inputs that can be processed in parallel in one pass of the network. Neuron storage is allocated to hold this amount during Build process, and this value reflects that."`
+ MaxData uint32 `inactive:"+"`
// total number of neurons
- NNeurons uint32 `inactive:"+" desc:"total number of neurons"`
+ NNeurons uint32 `inactive:"+"`
// total number of synapses
- NSyns uint32 `inactive:"+" desc:"total number of synapses"`
+ NSyns uint32 `inactive:"+"`
- // [view: -] storage for global vars
- Globals []float32 `view:"-" desc:"storage for global vars"`
+ // storage for global vars
+ Globals []float32 `view:"-"`
// array of layers
- Layers []*Layer `desc:"array of layers"`
+ Layers []*Layer
- // [view: -] [Layers] array of layer parameters, in 1-to-1 correspondence with Layers
- LayParams []LayerParams `view:"-" desc:"[Layers] array of layer parameters, in 1-to-1 correspondence with Layers"`
+ // array of layer parameters, in 1-to-1 correspondence with Layers
+ LayParams []LayerParams `view:"-"`
- // [view: -] [Layers][MaxData] array of layer values, with extra per data
- LayVals []LayerVals `view:"-" desc:"[Layers][MaxData] array of layer values, with extra per data"`
+ // array of layer values, with extra per data
+ LayVals []LayerVals `view:"-"`
- // [view: -] [Layers][Pools][MaxData] array of inhibitory pools for all layers.
- Pools []Pool `view:"-" desc:"[Layers][Pools][MaxData] array of inhibitory pools for all layers."`
+ // array of inhibitory pools for all layers.
+ Pools []Pool `view:"-"`
- // [view: -] [Layers][Neurons][MaxData] entire network's allocation of neuron variables, accessed via NrnV function with flexible striding
- Neurons []float32 `view:"-" desc:"[Layers][Neurons][MaxData] entire network's allocation of neuron variables, accessed via NrnV function with flexible striding"`
+ // entire network's allocation of neuron variables, accessed via NrnV function with flexible striding
+ Neurons []float32 `view:"-"`
- // [view: -] [Layers][Neurons][MaxData]] entire network's allocation of neuron average avariables, accessed via NrnAvgV function with flexible striding
- NeuronAvgs []float32 `view:"-" desc:"[Layers][Neurons][MaxData]] entire network's allocation of neuron average avariables, accessed via NrnAvgV function with flexible striding"`
+ // ] entire network's allocation of neuron average avariables, accessed via NrnAvgV function with flexible striding
+ NeuronAvgs []float32 `view:"-"`
- // [view: -] [Layers][Neurons] entire network's allocation of neuron index variables, accessed via NrnI function with flexible striding
- NeuronIxs []uint32 `view:"-" desc:"[Layers][Neurons] entire network's allocation of neuron index variables, accessed via NrnI function with flexible striding"`
+ // entire network's allocation of neuron index variables, accessed via NrnI function with flexible striding
+ NeuronIxs []uint32 `view:"-"`
- // [view: -] [Layers][SendPrjns] pointers to all projections in the network, sender-based
- Prjns []*Prjn `view:"-" desc:"[Layers][SendPrjns] pointers to all projections in the network, sender-based"`
+ // pointers to all projections in the network, sender-based
+ Prjns []*Prjn `view:"-"`
- // [view: -] [Layers][SendPrjns] array of projection parameters, in 1-to-1 correspondence with Prjns, sender-based
- PrjnParams []PrjnParams `view:"-" desc:"[Layers][SendPrjns] array of projection parameters, in 1-to-1 correspondence with Prjns, sender-based"`
+ // array of projection parameters, in 1-to-1 correspondence with Prjns, sender-based
+ PrjnParams []PrjnParams `view:"-"`
- // [view: -] [Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapse idx vars, organized sender-based, with flexible striding, accessed via SynI function
- SynapseIxs []uint32 `view:"-" desc:"[Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapse idx vars, organized sender-based, with flexible striding, accessed via SynI function"`
+ // entire network's allocation of synapse idx vars, organized sender-based, with flexible striding, accessed via SynI function
+ SynapseIxs []uint32 `view:"-"`
- // [view: -] [Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapses, organized sender-based, with flexible striding, accessed via SynV function
- Synapses []float32 `view:"-" desc:"[Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapses, organized sender-based, with flexible striding, accessed via SynV function"`
+ // entire network's allocation of synapses, organized sender-based, with flexible striding, accessed via SynV function
+ Synapses []float32 `view:"-"`
- // [view: -] [Layers][SendPrjns][SendNeurons][RecvNeurons][MaxData] entire network's allocation of synapse Ca vars, organized sender-based, with flexible striding, accessed via SynCaV function
- SynapseCas []float32 `view:"-" desc:"[Layers][SendPrjns][SendNeurons][RecvNeurons][MaxData] entire network's allocation of synapse Ca vars, organized sender-based, with flexible striding, accessed via SynCaV function"`
+ // entire network's allocation of synapse Ca vars, organized sender-based, with flexible striding, accessed via SynCaV function
+ SynapseCas []float32 `view:"-"`
- // [view: -] [Layers][SendPrjns][SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based.
- PrjnSendCon []StartN `view:"-" desc:"[Layers][SendPrjns][SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based."`
+ // starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based.
+ PrjnSendCon []StartN `view:"-"`
- // [view: -] [Layers][RecvPrjns][RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based.
- PrjnRecvCon []StartN `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based."`
+ // starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based.
+ PrjnRecvCon []StartN `view:"-"`
- // [view: -] [Layers][RecvPrjns][RecvNeurons][MaxDelay][MaxData] conductance buffer for accumulating spikes -- subslices are allocated to each projection -- uses int-encoded float values for faster GPU atomic integration
- PrjnGBuf []int32 `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons][MaxDelay][MaxData] conductance buffer for accumulating spikes -- subslices are allocated to each projection -- uses int-encoded float values for faster GPU atomic integration"`
+ // conductance buffer for accumulating spikes -- subslices are allocated to each projection -- uses int-encoded float values for faster GPU atomic integration
+ PrjnGBuf []int32 `view:"-"`
- // [view: -] [Layers][RecvPrjns][RecvNeurons][MaxData] synaptic conductance integrated over time per projection per recv neurons -- spikes come in via PrjnBuf -- subslices are allocated to each projection
- PrjnGSyns []float32 `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons][MaxData] synaptic conductance integrated over time per projection per recv neurons -- spikes come in via PrjnBuf -- subslices are allocated to each projection"`
+ // synaptic conductance integrated over time per projection per recv neurons -- spikes come in via PrjnBuf -- subslices are allocated to each projection
+ PrjnGSyns []float32 `view:"-"`
- // [view: -] [Layers][RecvPrjns] indexes into Prjns (organized by SendPrjn) organized by recv projections -- needed for iterating through recv prjns efficiently on GPU.
- RecvPrjnIdxs []uint32 `view:"-" desc:"[Layers][RecvPrjns] indexes into Prjns (organized by SendPrjn) organized by recv projections -- needed for iterating through recv prjns efficiently on GPU."`
+ // indexes into Prjns (organized by SendPrjn) organized by recv projections -- needed for iterating through recv prjns efficiently on GPU.
+ RecvPrjnIdxs []uint32 `view:"-"`
- // [view: -] [Layers][RecvPrjns][RecvNeurons][Syns] indexes into Synapses for each recv neuron, organized into blocks according to PrjnRecvCon, for receiver-based access.
- RecvSynIdxs []uint32 `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons][Syns] indexes into Synapses for each recv neuron, organized into blocks according to PrjnRecvCon, for receiver-based access."`
+ // indexes into Synapses for each recv neuron, organized into blocks according to PrjnRecvCon, for receiver-based access.
+ RecvSynIdxs []uint32 `view:"-"`
- // [In / Targ Layers][Neurons][Data] external input values for all Input / Target / Compare layers in the network -- the ApplyExt methods write to this per layer, and it is then actually applied in one consistent method.
- Exts []float32 `desc:"[In / Targ Layers][Neurons][Data] external input values for all Input / Target / Compare layers in the network -- the ApplyExt methods write to this per layer, and it is then actually applied in one consistent method."`
+ // external input values for all Input / Target / Compare layers in the network -- the ApplyExt methods write to this per layer, and it is then actually applied in one consistent method.
+ Exts []float32
- // [view: -] context used only for accessing neurons for display -- NetIdxs.NData in here is copied from active context in NewState
- Ctx Context `view:"-" desc:"context used only for accessing neurons for display -- NetIdxs.NData in here is copied from active context in NewState"`
+ // context used only for accessing neurons for display -- NetIdxs.NData in here is copied from active context in NewState
+ Ctx Context `view:"-"`
- // [view: -] random number generator for the network -- all random calls must use this -- set seed here for weight initialization values
- Rand erand.SysRand `view:"-" desc:"random number generator for the network -- all random calls must use this -- set seed here for weight initialization values"`
+ // random number generator for the network -- all random calls must use this -- set seed here for weight initialization values
+ Rand erand.SysRand `view:"-"`
// random seed to be set at the start of configuring the network and initializing the weights -- set this to get a different set of weights
- RndSeed int64 `inactive:"+" desc:"random seed to be set at the start of configuring the network and initializing the weights -- set this to get a different set of weights"`
+ RndSeed int64 `inactive:"+"`
// number of threads to use for parallel processing
- NThreads int `desc:"number of threads to use for parallel processing"`
+ NThreads int
- // [view: inline] GPU implementation
- GPU GPU `view:"inline" desc:"GPU implementation"`
+ // GPU implementation
+ GPU GPU `view:"inline"`
- // [view: -] record function timer information
- RecFunTimes bool `view:"-" desc:"record function timer information"`
+ // record function timer information
+ RecFunTimes bool `view:"-"`
- // [view: -] timers for each major function (step of processing)
- FunTimes map[string]*timer.Time `view:"-" desc:"timers for each major function (step of processing)"`
+ // timers for each major function (step of processing)
+ FunTimes map[string]*timer.Time `view:"-"`
}
// emer.Network interface methods:
@@ -342,8 +342,8 @@ func (nt *NetworkBase) Layout() {
// BoundsUpdt updates the Min / Max display bounds for 3D display
func (nt *NetworkBase) BoundsUpdt() {
- mn := mat32.NewVec3Scalar(mat32.Infinity)
- mx := mat32.Vec3Zero
+ mn := mat32.V3Scalar(mat32.Infinity)
+ mx := mat32.Vec3{}
for _, ly := range nt.Layers {
ps := ly.Pos()
sz := ly.Size()
diff --git a/axon/networkbase_test.go b/axon/networkbase_test.go
index d91cd0a4f..57eba9b0b 100644
--- a/axon/networkbase_test.go
+++ b/axon/networkbase_test.go
@@ -3,7 +3,7 @@ package axon
import (
"testing"
- "github.com/emer/emergent/prjn"
+ "github.com/emer/emergent/v2/prjn"
"github.com/stretchr/testify/assert"
)
diff --git a/axon/neuromod.go b/axon/neuromod.go
index 2979b08c7..66f1e7207 100644
--- a/axon/neuromod.go
+++ b/axon/neuromod.go
@@ -5,21 +5,14 @@
package axon
import (
- "github.com/goki/gosl/slbool"
- "github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "goki.dev/gosl/v2/slbool"
+ "goki.dev/mat32/v2"
)
-//go:generate stringer -type=DAModTypes
-//go:generate stringer -type=ValenceTypes
-
-var KiT_DAModTypes = kit.Enums.AddEnum(DAModTypesN, kit.NotBitFlag, nil)
-var KiT_ValenceTypes = kit.Enums.AddEnum(ValenceTypesN, kit.NotBitFlag, nil)
-
//gosl: start neuromod
// DAModTypes are types of dopamine modulation of neural activity.
-type DAModTypes int32
+type DAModTypes int32 //enums:enum
const (
// NoDAMod means there is no effect of dopamine on neural activity
@@ -40,12 +33,10 @@ const (
// There are a subset of DA neurons that send increased DA for
// both negative and positive outcomes, targeting frontal neurons.
D1AbsMod
-
- DAModTypesN
)
// ValenceTypes are types of valence coding: positive or negative.
-type ValenceTypes int32
+type ValenceTypes int32 //enums:enum
const (
// Positive valence codes for outcomes aligned with drives / goals.
@@ -53,8 +44,6 @@ const (
// Negative valence codes for harmful or aversive outcomes.
Negative
-
- ValenceTypesN
)
// NeuroModParams specifies the effects of neuromodulators on neural
@@ -63,31 +52,31 @@ const (
type NeuroModParams struct {
// dopamine receptor-based effects of dopamine modulation on excitatory and inhibitory conductances: D1 is excitatory, D2 is inhibitory as a function of increasing dopamine
- DAMod DAModTypes `desc:"dopamine receptor-based effects of dopamine modulation on excitatory and inhibitory conductances: D1 is excitatory, D2 is inhibitory as a function of increasing dopamine"`
+ DAMod DAModTypes
// valence coding of this layer -- may affect specific layer types but does not directly affect neuromodulators currently
- Valence ValenceTypes `desc:"valence coding of this layer -- may affect specific layer types but does not directly affect neuromodulators currently"`
+ Valence ValenceTypes
- // [viewif: DAMod!=NoDAMod] multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor
- DAModGain float32 `viewif:"DAMod!=NoDAMod" desc:"multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor"`
+ // multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor
+ DAModGain float32 `viewif:"DAMod!=NoDAMod"`
// modulate the sign of the learning rate factor according to the DA sign, taking into account the DAMod sign reversal for D2Mod, also using BurstGain and DipGain to modulate DA value -- otherwise, only the magnitude of the learning rate is modulated as a function of raw DA magnitude according to DALRateMod (without additional gain factors)
- DALRateSign slbool.Bool `desc:"modulate the sign of the learning rate factor according to the DA sign, taking into account the DAMod sign reversal for D2Mod, also using BurstGain and DipGain to modulate DA value -- otherwise, only the magnitude of the learning rate is modulated as a function of raw DA magnitude according to DALRateMod (without additional gain factors)"`
+ DALRateSign slbool.Bool
- // [viewif: !DALRateSign] [min: 0] [max: 1] if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%
- DALRateMod float32 `min:"0" max:"1" viewif:"!DALRateSign" desc:"if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%"`
+ // if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%
+ DALRateMod float32 `min:"0" max:"1" viewif:"!DALRateSign"`
- // [min: 0] [max: 1] proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%
- AChLRateMod float32 `min:"0" max:"1" desc:"proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%"`
+ // proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%
+ AChLRateMod float32 `min:"0" max:"1"`
- // [def: 0,5] [min: 0] amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory
- AChDisInhib float32 `min:"0" def:"0,5" desc:"amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory"`
+ // amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory
+ AChDisInhib float32 `min:"0" def:"0,5"`
- // [def: 1] [min: 0] multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign!
- BurstGain float32 `min:"0" def:"1" desc:"multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign!"`
+ // multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign!
+ BurstGain float32 `min:"0" def:"1"`
- // [def: 1] [min: 0] multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext
- DipGain float32 `min:"0" def:"1" desc:"multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext"`
+ // multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext
+ DipGain float32 `min:"0" def:"1"`
pad, pad1, pad2 float32
}
diff --git a/axon/neuron.go b/axon/neuron.go
index 93bb153b8..a9f789014 100644
--- a/axon/neuron.go
+++ b/axon/neuron.go
@@ -7,34 +7,13 @@ package axon
import (
"fmt"
- "github.com/emer/emergent/netview"
- "github.com/goki/ki/kit"
+ "github.com/emer/emergent/v2/netview"
)
-//go:generate stringer -type=NeuronFlags
-//go:generate stringer -type=NeuronVars
-//go:generate stringer -type=NeuronAvgVars
-//go:generate stringer -type=NeuronIdxs
-
-var KiT_NeuronVars = kit.Enums.AddEnum(NeuronVarsN, kit.NotBitFlag, nil)
-
-func (ev NeuronVars) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *NeuronVars) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
-
-var KiT_NeuronAvgVars = kit.Enums.AddEnum(NeuronAvgVarsN, kit.NotBitFlag, nil)
-
-func (ev NeuronAvgVars) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *NeuronAvgVars) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
-
-var KiT_NeuronIdxs = kit.Enums.AddEnum(NeuronIdxsN, kit.NotBitFlag, nil)
-
-func (ev NeuronIdxs) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *NeuronIdxs) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
-
//gosl: start neuron
// NeuronFlags are bit-flags encoding relevant binary state for neurons
-type NeuronFlags int32
+type NeuronFlags int32 //enums:enum
// The neuron flags
const (
@@ -55,7 +34,7 @@ const (
// NeuronVars are the neuron variables representing current active state,
// specific to each input data state.
// See NeuronAvgVars for vars shared across data.
-type NeuronVars int32
+type NeuronVars int32 //enums:enum
const (
/////////////////////////////////////////
@@ -339,18 +318,85 @@ const (
// and are writable (indexes are read only).
NrnFlags
- NeuronVarsN
+ // IMPORTANT: if NrnFlags is not the last, need to update gosl defn below
)
+// NeuronAvgVars are mostly neuron variables involved in longer-term average activity
+// which is aggregated over time and not specific to each input data state,
+// along with any other state that is not input data specific.
+type NeuronAvgVars int32 //enums:enum
+
+const (
+ // ActAvg is average activation (of minus phase activation state) over long time intervals (time constant = Dt.LongAvgTau) -- useful for finding hog units and seeing overall distribution of activation
+ ActAvg NeuronAvgVars = iota
+
+ // AvgPct is ActAvg as a proportion of overall layer activation -- this is used for synaptic scaling to match TrgAvg activation -- updated at SlowInterval intervals
+ AvgPct
+
+ // TrgAvg is neuron's target average activation as a proportion of overall layer activation, assigned during weight initialization, driving synaptic scaling relative to AvgPct
+ TrgAvg
+
+ // DTrgAvg is change in neuron's target average activation as a result of unit-wise error gradient -- acts like a bias weight. MPI needs to share these across processors.
+ DTrgAvg
+
+ // AvgDif is AvgPct - TrgAvg -- i.e., the error in overall activity level relative to set point for this neuron, which drives synaptic scaling -- updated at SlowInterval intervals
+ AvgDif
+
+ // GeBase is baseline level of Ge, added to GeRaw, for intrinsic excitability
+ GeBase
+
+ // GiBase is baseline level of Gi, added to GiRaw, for intrinsic excitability
+ GiBase
+
+ // IMPORTANT: if GiBase is not the last, need to update gosl defn below
+)
+
+// NeuronIdxs are the neuron indexes and other uint32 values.
+// There is only one of these per neuron -- not data parallel.
+// note: Flags are encoded in Vars because they are data parallel and
+// writable, whereas indexes are read-only.
+type NeuronIdxs int32 //enums:enum
+
+const (
+ // NrnNeurIdx is the index of this neuron within its owning layer
+ NrnNeurIdx NeuronIdxs = iota
+
+ // NrnLayIdx is the index of the layer that this neuron belongs to,
+ // needed for neuron-level parallel code.
+ NrnLayIdx
+
+ // NrnSubPool is the index of the sub-level inhibitory pool for this neuron
+ // (only for 4D shapes, the pool (unit-group / hypercolumn) structure level).
+ // Indicies start at 1 -- 0 is layer-level pool (is 0 if no sub-pools).
+ NrnSubPool
+
+ // IMPORTANT: if NrnSubPool is not the last, need to update gosl defn below
+)
+
+//gosl: end neuron
+
+//gosl: hlsl neuron
+/*
+static const NeuronVars NeuronVarsN = NrnFlags + 1;
+static const NeuronAvgVars NeuronAvgVarsN = GiBase + 1;
+static const NeuronIdxs NeuronIdxsN = NrnSubPool + 1;
+*/
+//gosl: end neuron
+
+//gosl: start neuron
+
+////////////////////////////////////////////////
+// Strides
+
// NeuronVarStrides encodes the stride offsets for neuron variable access
// into network float32 array. Data is always the inner-most variable.
type NeuronVarStrides struct {
// neuron level
- Neuron uint32 `desc:"neuron level"`
+ Neuron uint32
// variable level
- Var uint32 `desc:"variable level"`
+ Var uint32
pad, pad1 uint32
}
@@ -377,45 +423,15 @@ func (ns *NeuronVarStrides) SetVarOuter(nneur, ndata int) {
////////////////////////////////////////////////
// NeuronAvgVars
-// NeuronAvgVars are mostly neuron variables involved in longer-term average activity
-// which is aggregated over time and not specific to each input data state,
-// along with any other state that is not input data specific.
-type NeuronAvgVars int32
-
-const (
- // ActAvg is average activation (of minus phase activation state) over long time intervals (time constant = Dt.LongAvgTau) -- useful for finding hog units and seeing overall distribution of activation
- ActAvg NeuronAvgVars = iota
-
- // AvgPct is ActAvg as a proportion of overall layer activation -- this is used for synaptic scaling to match TrgAvg activation -- updated at SlowInterval intervals
- AvgPct
-
- // TrgAvg is neuron's target average activation as a proportion of overall layer activation, assigned during weight initialization, driving synaptic scaling relative to AvgPct
- TrgAvg
-
- // DTrgAvg is change in neuron's target average activation as a result of unit-wise error gradient -- acts like a bias weight. MPI needs to share these across processors.
- DTrgAvg
-
- // AvgDif is AvgPct - TrgAvg -- i.e., the error in overall activity level relative to set point for this neuron, which drives synaptic scaling -- updated at SlowInterval intervals
- AvgDif
-
- // GeBase is baseline level of Ge, added to GeRaw, for intrinsic excitability
- GeBase
-
- // GiBase is baseline level of Gi, added to GiRaw, for intrinsic excitability
- GiBase
-
- NeuronAvgVarsN
-)
-
// NeuronAvgVarStrides encodes the stride offsets for neuron variable access
// into network float32 array. Data is always the inner-most variable.
type NeuronAvgVarStrides struct {
// neuron level
- Neuron uint32 `desc:"neuron level"`
+ Neuron uint32
// variable level
- Var uint32 `desc:"variable level"`
+ Var uint32
pad, pad1 uint32
}
@@ -442,37 +458,15 @@ func (ns *NeuronAvgVarStrides) SetVarOuter(nneur int) {
////////////////////////////////////////////////
// Idxs
-// NeuronIdxs are the neuron indexes and other uint32 values.
-// There is only one of these per neuron -- not data parallel.
-// note: Flags are encoded in Vars because they are data parallel and
-// writable, whereas indexes are read-only.
-type NeuronIdxs int32
-
-const (
- // NrnNeurIdx is the index of this neuron within its owning layer
- NrnNeurIdx NeuronIdxs = iota
-
- // NrnLayIdx is the index of the layer that this neuron belongs to,
- // needed for neuron-level parallel code.
- NrnLayIdx
-
- // NrnSubPool is the index of the sub-level inhibitory pool for this neuron
- // (only for 4D shapes, the pool (unit-group / hypercolumn) structure level).
- // Indicies start at 1 -- 0 is layer-level pool (is 0 if no sub-pools).
- NrnSubPool
-
- NeuronIdxsN
-)
-
// NeuronIdxStrides encodes the stride offsets for neuron index access
// into network uint32 array.
type NeuronIdxStrides struct {
// neuron level
- Neuron uint32 `desc:"neuron level"`
+ Neuron uint32
// index value level
- Index uint32 `desc:"index value level"`
+ Index uint32
pad, pad1 uint32
}
@@ -500,6 +494,9 @@ func (ns *NeuronIdxStrides) SetIdxOuter(nneur int) {
//gosl: end neuron
+////////////////////////////////////////////////
+// Props
+
// NeuronVarProps has all of the display properties for neuron variables, including desc tooltips
var NeuronVarProps = map[string]string{
/////////////////////////////////////////
diff --git a/axon/neuronavgvars_string.go b/axon/neuronavgvars_string.go
deleted file mode 100644
index d50bbb399..000000000
--- a/axon/neuronavgvars_string.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Code generated by "stringer -type=NeuronAvgVars"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[ActAvg-0]
- _ = x[AvgPct-1]
- _ = x[TrgAvg-2]
- _ = x[DTrgAvg-3]
- _ = x[AvgDif-4]
- _ = x[GeBase-5]
- _ = x[GiBase-6]
- _ = x[NeuronAvgVarsN-7]
-}
-
-const _NeuronAvgVars_name = "ActAvgAvgPctTrgAvgDTrgAvgAvgDifGeBaseGiBaseNeuronAvgVarsN"
-
-var _NeuronAvgVars_index = [...]uint8{0, 6, 12, 18, 25, 31, 37, 43, 57}
-
-func (i NeuronAvgVars) String() string {
- if i < 0 || i >= NeuronAvgVars(len(_NeuronAvgVars_index)-1) {
- return "NeuronAvgVars(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _NeuronAvgVars_name[_NeuronAvgVars_index[i]:_NeuronAvgVars_index[i+1]]
-}
-
-func (i *NeuronAvgVars) FromString(s string) error {
- for j := 0; j < len(_NeuronAvgVars_index)-1; j++ {
- if s == _NeuronAvgVars_name[_NeuronAvgVars_index[j]:_NeuronAvgVars_index[j+1]] {
- *i = NeuronAvgVars(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: NeuronAvgVars")
-}
-
-var _NeuronAvgVars_descMap = map[NeuronAvgVars]string{
- 0: `ActAvg is average activation (of minus phase activation state) over long time intervals (time constant = Dt.LongAvgTau) -- useful for finding hog units and seeing overall distribution of activation`,
- 1: `AvgPct is ActAvg as a proportion of overall layer activation -- this is used for synaptic scaling to match TrgAvg activation -- updated at SlowInterval intervals`,
- 2: `TrgAvg is neuron's target average activation as a proportion of overall layer activation, assigned during weight initialization, driving synaptic scaling relative to AvgPct`,
- 3: `DTrgAvg is change in neuron's target average activation as a result of unit-wise error gradient -- acts like a bias weight. MPI needs to share these across processors.`,
- 4: `AvgDif is AvgPct - TrgAvg -- i.e., the error in overall activity level relative to set point for this neuron, which drives synaptic scaling -- updated at SlowInterval intervals`,
- 5: `GeBase is baseline level of Ge, added to GeRaw, for intrinsic excitability`,
- 6: `GiBase is baseline level of Gi, added to GiRaw, for intrinsic excitability`,
- 7: ``,
-}
-
-func (i NeuronAvgVars) Desc() string {
- if str, ok := _NeuronAvgVars_descMap[i]; ok {
- return str
- }
- return "NeuronAvgVars(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/neuronflags_string.go b/axon/neuronflags_string.go
deleted file mode 100644
index 4437e3a04..000000000
--- a/axon/neuronflags_string.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Code generated by "stringer -type=NeuronFlags"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[NeuronOff-1]
- _ = x[NeuronHasExt-2]
- _ = x[NeuronHasTarg-4]
- _ = x[NeuronHasCmpr-8]
-}
-
-const (
- _NeuronFlags_name_0 = "NeuronOffNeuronHasExt"
- _NeuronFlags_name_1 = "NeuronHasTarg"
- _NeuronFlags_name_2 = "NeuronHasCmpr"
-)
-
-var (
- _NeuronFlags_index_0 = [...]uint8{0, 9, 21}
-)
-
-func (i NeuronFlags) String() string {
- switch {
- case 1 <= i && i <= 2:
- i -= 1
- return _NeuronFlags_name_0[_NeuronFlags_index_0[i]:_NeuronFlags_index_0[i+1]]
- case i == 4:
- return _NeuronFlags_name_1
- case i == 8:
- return _NeuronFlags_name_2
- default:
- return "NeuronFlags(" + strconv.FormatInt(int64(i), 10) + ")"
- }
-}
-
-var _NeuronFlags_descMap = map[NeuronFlags]string{
- 1: `NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned)`,
- 2: `NeuronHasExt means the neuron has external input in its Ext field`,
- 4: `NeuronHasTarg means the neuron has external target input in its Target field`,
- 8: `NeuronHasCmpr means the neuron has external comparison input in its Target field -- used for computing comparison statistics but does not drive neural activity ever`,
-}
-
-func (i NeuronFlags) Desc() string {
- if str, ok := _NeuronFlags_descMap[i]; ok {
- return str
- }
- return "NeuronFlags(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/neuronidxs_string.go b/axon/neuronidxs_string.go
deleted file mode 100644
index d0bf3358b..000000000
--- a/axon/neuronidxs_string.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Code generated by "stringer -type=NeuronIdxs"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[NrnNeurIdx-0]
- _ = x[NrnLayIdx-1]
- _ = x[NrnSubPool-2]
- _ = x[NeuronIdxsN-3]
-}
-
-const _NeuronIdxs_name = "NrnNeurIdxNrnLayIdxNrnSubPoolNeuronIdxsN"
-
-var _NeuronIdxs_index = [...]uint8{0, 10, 19, 29, 40}
-
-func (i NeuronIdxs) String() string {
- if i < 0 || i >= NeuronIdxs(len(_NeuronIdxs_index)-1) {
- return "NeuronIdxs(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _NeuronIdxs_name[_NeuronIdxs_index[i]:_NeuronIdxs_index[i+1]]
-}
-
-func (i *NeuronIdxs) FromString(s string) error {
- for j := 0; j < len(_NeuronIdxs_index)-1; j++ {
- if s == _NeuronIdxs_name[_NeuronIdxs_index[j]:_NeuronIdxs_index[j+1]] {
- *i = NeuronIdxs(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: NeuronIdxs")
-}
-
-var _NeuronIdxs_descMap = map[NeuronIdxs]string{
- 0: `NrnNeurIdx is the index of this neuron within its owning layer`,
- 1: `NrnLayIdx is the index of the layer that this neuron belongs to, needed for neuron-level parallel code.`,
- 2: `NrnSubPool is the index of the sub-level inhibitory pool for this neuron (only for 4D shapes, the pool (unit-group / hypercolumn) structure level). Indicies start at 1 -- 0 is layer-level pool (is 0 if no sub-pools).`,
- 3: ``,
-}
-
-func (i NeuronIdxs) Desc() string {
- if str, ok := _NeuronIdxs_descMap[i]; ok {
- return str
- }
- return "NeuronIdxs(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/neuronvars_string.go b/axon/neuronvars_string.go
deleted file mode 100644
index 2fa0d8ee5..000000000
--- a/axon/neuronvars_string.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Code generated by "stringer -type=NeuronVars"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[Spike-0]
- _ = x[Spiked-1]
- _ = x[Act-2]
- _ = x[ActInt-3]
- _ = x[ActM-4]
- _ = x[ActP-5]
- _ = x[Ext-6]
- _ = x[Target-7]
- _ = x[Ge-8]
- _ = x[Gi-9]
- _ = x[Gk-10]
- _ = x[Inet-11]
- _ = x[Vm-12]
- _ = x[VmDend-13]
- _ = x[ISI-14]
- _ = x[ISIAvg-15]
- _ = x[CaSpkP-16]
- _ = x[CaSpkD-17]
- _ = x[CaSyn-18]
- _ = x[CaSpkM-19]
- _ = x[CaSpkPM-20]
- _ = x[CaLrn-21]
- _ = x[NrnCaM-22]
- _ = x[NrnCaP-23]
- _ = x[NrnCaD-24]
- _ = x[CaDiff-25]
- _ = x[Attn-26]
- _ = x[RLRate-27]
- _ = x[SpkMaxCa-28]
- _ = x[SpkMax-29]
- _ = x[SpkPrv-30]
- _ = x[SpkSt1-31]
- _ = x[SpkSt2-32]
- _ = x[GeNoiseP-33]
- _ = x[GeNoise-34]
- _ = x[GiNoiseP-35]
- _ = x[GiNoise-36]
- _ = x[GeExt-37]
- _ = x[GeRaw-38]
- _ = x[GeSyn-39]
- _ = x[GiRaw-40]
- _ = x[GiSyn-41]
- _ = x[GeInt-42]
- _ = x[GeIntNorm-43]
- _ = x[GiInt-44]
- _ = x[GModRaw-45]
- _ = x[GModSyn-46]
- _ = x[GMaintRaw-47]
- _ = x[GMaintSyn-48]
- _ = x[SSGi-49]
- _ = x[SSGiDend-50]
- _ = x[Gak-51]
- _ = x[MahpN-52]
- _ = x[SahpCa-53]
- _ = x[SahpN-54]
- _ = x[GknaMed-55]
- _ = x[GknaSlow-56]
- _ = x[GnmdaSyn-57]
- _ = x[Gnmda-58]
- _ = x[GnmdaMaint-59]
- _ = x[GnmdaLrn-60]
- _ = x[NmdaCa-61]
- _ = x[GgabaB-62]
- _ = x[GABAB-63]
- _ = x[GABABx-64]
- _ = x[Gvgcc-65]
- _ = x[VgccM-66]
- _ = x[VgccH-67]
- _ = x[VgccCa-68]
- _ = x[VgccCaInt-69]
- _ = x[SKCaIn-70]
- _ = x[SKCaR-71]
- _ = x[SKCaM-72]
- _ = x[Gsk-73]
- _ = x[Burst-74]
- _ = x[BurstPrv-75]
- _ = x[CtxtGe-76]
- _ = x[CtxtGeRaw-77]
- _ = x[CtxtGeOrig-78]
- _ = x[NrnFlags-79]
- _ = x[NeuronVarsN-80]
-}
-
-const _NeuronVars_name = "SpikeSpikedActActIntActMActPExtTargetGeGiGkInetVmVmDendISIISIAvgCaSpkPCaSpkDCaSynCaSpkMCaSpkPMCaLrnNrnCaMNrnCaPNrnCaDCaDiffAttnRLRateSpkMaxCaSpkMaxSpkPrvSpkSt1SpkSt2GeNoisePGeNoiseGiNoisePGiNoiseGeExtGeRawGeSynGiRawGiSynGeIntGeIntNormGiIntGModRawGModSynGMaintRawGMaintSynSSGiSSGiDendGakMahpNSahpCaSahpNGknaMedGknaSlowGnmdaSynGnmdaGnmdaMaintGnmdaLrnNmdaCaGgabaBGABABGABABxGvgccVgccMVgccHVgccCaVgccCaIntSKCaInSKCaRSKCaMGskBurstBurstPrvCtxtGeCtxtGeRawCtxtGeOrigNrnFlagsNeuronVarsN"
-
-var _NeuronVars_index = [...]uint16{0, 5, 11, 14, 20, 24, 28, 31, 37, 39, 41, 43, 47, 49, 55, 58, 64, 70, 76, 81, 87, 94, 99, 105, 111, 117, 123, 127, 133, 141, 147, 153, 159, 165, 173, 180, 188, 195, 200, 205, 210, 215, 220, 225, 234, 239, 246, 253, 262, 271, 275, 283, 286, 291, 297, 302, 309, 317, 325, 330, 340, 348, 354, 360, 365, 371, 376, 381, 386, 392, 401, 407, 412, 417, 420, 425, 433, 439, 448, 458, 466, 477}
-
-func (i NeuronVars) String() string {
- if i < 0 || i >= NeuronVars(len(_NeuronVars_index)-1) {
- return "NeuronVars(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _NeuronVars_name[_NeuronVars_index[i]:_NeuronVars_index[i+1]]
-}
-
-func (i *NeuronVars) FromString(s string) error {
- for j := 0; j < len(_NeuronVars_index)-1; j++ {
- if s == _NeuronVars_name[_NeuronVars_index[j]:_NeuronVars_index[j+1]] {
- *i = NeuronVars(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: NeuronVars")
-}
-
-var _NeuronVars_descMap = map[NeuronVars]string{
- 0: `Spike is whether neuron has spiked or not on this cycle (0 or 1)`,
- 1: `Spiked is 1 if neuron has spiked within the last 10 cycles (msecs), corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise -- useful for visualization and computing activity levels in terms of average spiked levels.`,
- 2: `Act is rate-coded activation value reflecting instantaneous estimated rate of spiking, based on 1 / ISIAvg. This drives feedback inhibition in the FFFB function (todo: this will change when better inhibition is implemented), and is integrated over time for ActInt which is then used for performance statistics and layer average activations, etc. Should not be used for learning or other computations.`,
- 3: `ActInt is integrated running-average activation value computed from Act with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall activation state across the ThetaCycle time scale, as the overall response of network to current input state -- this is copied to ActM and ActP at the ends of the minus and plus phases, respectively, and used in computing performance-level statistics (which are typically based on ActM). Should not be used for learning or other computations.`,
- 4: `ActM is ActInt activation state at end of third quarter, representing the posterior-cortical minus phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.`,
- 5: `ActP is ActInt activation state at end of fourth quarter, representing the posterior-cortical plus_phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.`,
- 6: `Ext is external input: drives activation of unit from outside influences (e.g., sensory input)`,
- 7: `Target is the target value: drives learning to produce this activation value`,
- 8: `Ge is total excitatory conductance, including all forms of excitation (e.g., NMDA) -- does *not* include Gbar.E`,
- 9: `Gi is total inhibitory synaptic conductance -- the net inhibitory input to the neuron -- does *not* include Gbar.I`,
- 10: `Gk is total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects -- does *not* include Gbar.K`,
- 11: `Inet is net current produced by all channels -- drives update of Vm`,
- 12: `Vm is membrane potential -- integrates Inet current over time`,
- 13: `VmDend is dendritic membrane potential -- has a slower time constant, is not subject to the VmR reset after spiking`,
- 14: `ISI is current inter-spike-interval -- counts up since last spike. Starts at -1 when initialized.`,
- 15: `ISIAvg is average inter-spike-interval -- average time interval between spikes, integrated with ISITau rate constant (relatively fast) to capture something close to an instantaneous spiking rate. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization.`,
- 16: `CaSpkP is continuous cascaded integration of CaSpkM at PTau time constant (typically 40), representing neuron-level purely spiking version of plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.`,
- 17: `CaSpkD is continuous cascaded integration CaSpkP at DTau time constant (typically 40), representing neuron-level purely spiking version of minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.`,
- 18: `CaSyn is spike-driven calcium trace for synapse-level Ca-driven learning: exponential integration of SpikeG * Spike at SynTau time constant (typically 30). Synapses integrate send.CaSyn * recv.CaSyn across M, P, D time integrals for the synaptic trace driving credit assignment in learning. Time constant reflects binding time of Glu to NMDA and Ca buffering postsynaptically, and determines time window where pre * post spiking must overlap to drive learning.`,
- 19: `CaSpkM is spike-driven calcium trace used as a neuron-level proxy for synpatic credit assignment factor based on continuous time-integrated spiking: exponential integration of SpikeG * Spike at MTau time constant (typically 5). Simulates a calmodulin (CaM) like signal at the most abstract level.`,
- 20: `CaSpkPM is minus-phase snapshot of the CaSpkP value -- similar to ActM but using a more directly spike-integrated value.`,
- 21: `CaLrn is recv neuron calcium signal used to drive temporal error difference component of standard learning rule, combining NMDA (NmdaCa) and spiking-driven VGCC (VgccCaInt) calcium sources (vs. CaSpk* which only reflects spiking component). This is integrated into CaM, CaP, CaD, and temporal derivative is CaP - CaD (CaMKII - DAPK1). This approximates the backprop error derivative on net input, but VGCC component adds a proportion of recv activation delta as well -- a balance of both works best. The synaptic-level trace multiplier provides the credit assignment factor, reflecting coincident activity and potentially integrated over longer multi-trial timescales.`,
- 22: `NrnCaM is integrated CaLrn at MTau timescale (typically 5), simulating a calmodulin (CaM) like signal, which then drives CaP, CaD for delta signal driving error-driven learning.`,
- 23: `NrnCaP is cascaded integration of CaM at PTau time constant (typically 40), representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule.`,
- 24: `NrnCaD is cascaded integratoin of CaP at DTau time constant (typically 40), representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule.`,
- 25: `CaDiff is difference between CaP - CaD -- this is the error signal that drives error-driven learning.`,
- 26: `Attn is Attentional modulation factor, which can be set by special layers such as the TRC -- multiplies Ge`,
- 27: `RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from the CaSpkD of recv unit, and the normalized difference CaSpkP - CaSpkD / MAX(CaSpkP - CaSpkD).`,
- 28: `SpkMaxCa is Ca integrated like CaSpkP but only starting at MaxCycStart cycle, to prevent inclusion of carryover spiking from prior theta cycle trial -- the PTau time constant otherwise results in significant carryover. This is the input to SpkMax`,
- 29: `SpkMax is maximum CaSpkP across one theta cycle time window (max of SpkMaxCa) -- used for specialized algorithms that have more phasic behavior within a single trial, e.g., BG Matrix layer gating. Also useful for visualization of peak activity of neurons.`,
- 30: `SpkPrv is final CaSpkD activation state at end of previous theta cycle. used for specialized learning mechanisms that operate on delayed sending activations.`,
- 31: `SpkSt1 is the activation state at specific time point within current state processing window (e.g., 50 msec for beta cycle within standard theta cycle), as saved by SpkSt1() function. Used for example in hippocampus for CA3, CA1 learning`,
- 32: `SpkSt2 is the activation state at specific time point within current state processing window (e.g., 100 msec for beta cycle within standard theta cycle), as saved by SpkSt2() function. Used for example in hippocampus for CA3, CA1 learning`,
- 33: `GeNoiseP is accumulating poisson probability factor for driving excitatory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on lambda.`,
- 34: `GeNoise is integrated noise excitatory conductance, added into Ge`,
- 35: `GiNoiseP is accumulating poisson probability factor for driving inhibitory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on lambda.`,
- 36: `GiNoise is integrated noise inhibotyr conductance, added into Gi`,
- 37: `GeExt is extra excitatory conductance added to Ge -- from Ext input, GeCtxt etc`,
- 38: `GeRaw is raw excitatory conductance (net input) received from senders = current raw spiking drive`,
- 39: `GeSyn is time-integrated total excitatory synaptic conductance, with an instantaneous rise time from each spike (in GeRaw) and exponential decay with Dt.GeTau, aggregated over projections -- does *not* include Gbar.E`,
- 40: `GiRaw is raw inhibitory conductance (net input) received from senders = current raw spiking drive`,
- 41: `GiSyn is time-integrated total inhibitory synaptic conductance, with an instantaneous rise time from each spike (in GiRaw) and exponential decay with Dt.GiTau, aggregated over projections -- does *not* include Gbar.I. This is added with computed FFFB inhibition to get the full inhibition in Gi`,
- 42: `GeInt is integrated running-average activation value computed from Ge with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall Ge level across the ThetaCycle time scale (Ge itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall excitatory drive`,
- 43: `GeIntNorm is normalized GeInt value (divided by the layer maximum) -- this is used for learning in layers that require learning on subthreshold activity`,
- 44: `GiInt is integrated running-average activation value computed from GiSyn with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall synaptic Gi level across the ThetaCycle time scale (Gi itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall inhibitory drive`,
- 45: `GModRaw is raw modulatory conductance, received from GType = ModulatoryG projections`,
- 46: `GModSyn is syn integrated modulatory conductance, received from GType = ModulatoryG projections`,
- 47: `GMaintRaw is raw maintenance conductance, received from GType = MaintG projections`,
- 48: `GMaintSyn is syn integrated maintenance conductance, integrated using MaintNMDA params.`,
- 49: `SSGi is SST+ somatostatin positive slow spiking inhibition`,
- 50: `SSGiDend is amount of SST+ somatostatin positive slow spiking inhibition applied to dendritic Vm (VmDend)`,
- 51: `Gak is conductance of A-type K potassium channels`,
- 52: `MahpN is accumulating voltage-gated gating value for the medium time scale AHP`,
- 53: `SahpCa is slowly accumulating calcium value that drives the slow AHP`,
- 54: `SahpN is sAHP gating value`,
- 55: `GknaMed is conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick) -- produces accommodation / adaptation of firing`,
- 56: `GknaSlow is conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack) -- produces accommodation / adaptation of firing`,
- 57: `GnmdaSyn is integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant`,
- 58: `Gnmda is net postsynaptic (recv) NMDA conductance, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential`,
- 59: `GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from GMaintSyn and GMaintRaw, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential`,
- 60: `GnmdaLrn is learning version of integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant -- drives NmdaCa that then drives CaM for learning`,
- 61: `NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM`,
- 62: `GgabaB is net GABA-B conductance, after Vm gating and Gbar + Gbase -- applies to Gk, not Gi, for GIRK, with .1 reversal potential.`,
- 63: `GABAB is GABA-B / GIRK activation -- time-integrated value with rise and decay time constants`,
- 64: `GABABx is GABA-B / GIRK internal drive variable -- gets the raw activation and decays`,
- 65: `Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels`,
- 66: `VgccM is activation gate of VGCC channels`,
- 67: `VgccH inactivation gate of VGCC channels`,
- 68: `VgccCa is instantaneous VGCC calcium flux -- can be driven by spiking or directly from Gvgcc`,
- 69: `VgccCaInt time-integrated VGCC calcium flux -- this is actually what drives learning`,
- 70: `SKCaIn is intracellular calcium store level, available to be released with spiking as SKCaR, which can bind to SKCa receptors and drive K current. replenishment is a function of spiking activity being below a threshold`,
- 71: `SKCaR released amount of intracellular calcium, from SKCaIn, as a function of spiking events. this can bind to SKCa channels and drive K currents.`,
- 72: `SKCaM is Calcium-gated potassium channel gating factor, driven by SKCaR via a Hill equation as in chans.SKPCaParams.`,
- 73: `Gsk is Calcium-gated potassium channel conductance as a function of Gbar * SKCaM.`,
- 74: `Burst is 5IB bursting activation value, computed by thresholding regular CaSpkP value in Super superficial layers`,
- 75: `BurstPrv is previous Burst bursting activation from prior time step -- used for context-based learning`,
- 76: `CtxtGe is context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`,
- 77: `CtxtGeRaw is raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`,
- 78: `CtxtGeOrig is original CtxtGe value prior to any decay factor -- updates at end of plus phase.`,
- 79: `NrnFlags are bit flags for binary state variables, which are converted to / from uint32. These need to be in Vars because they can be differential per data (for ext inputs) and are writable (indexes are read only).`,
- 80: ``,
-}
-
-func (i NeuronVars) Desc() string {
- if str, ok := _NeuronVars_descMap[i]; ok {
- return str
- }
- return "NeuronVars(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/pcore_layers.go b/axon/pcore_layers.go
index 601610139..9ab3d3aea 100644
--- a/axon/pcore_layers.go
+++ b/axon/pcore_layers.go
@@ -8,9 +8,8 @@ import (
"log"
"strings"
- "github.com/goki/gosl/slbool"
- "github.com/goki/ki/bools"
- "github.com/goki/ki/kit"
+ "goki.dev/glop/num"
+ "goki.dev/gosl/v2/slbool"
)
//gosl: start pcore_layers
@@ -23,32 +22,32 @@ import (
// Must set Learn.NeuroMod.DAMod = D1Mod or D2Mod via SetBuildConfig("DAMod").
type MatrixParams struct {
- // [def: 0.05] threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated
- GateThr float32 `def:"0.05" desc:"threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated"`
+ // threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated
+ GateThr float32 `def:"0.05"`
// is this a ventral striatum (VS) matrix layer? if true, the gating status of this layer is recorded in the Global state, and used for updating effort and other factors.
- IsVS slbool.Bool `desc:"is this a ventral striatum (VS) matrix layer? if true, the gating status of this layer is recorded in the Global state, and used for updating effort and other factors."`
+ IsVS slbool.Bool
// index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName
- OtherMatrixIdx int32 `inactive:"+" desc:"index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName"`
+ OtherMatrixIdx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used
- ThalLay1Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used"`
+ ThalLay1Idx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay2Name if present -- -1 if not used
- ThalLay2Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay2Name if present -- -1 if not used"`
+ ThalLay2Idx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay3Name if present -- -1 if not used
- ThalLay3Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay3Name if present -- -1 if not used"`
+ ThalLay3Idx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay4Name if present -- -1 if not used
- ThalLay4Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay4Name if present -- -1 if not used"`
+ ThalLay4Idx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay5Name if present -- -1 if not used
- ThalLay5Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay5Name if present -- -1 if not used"`
+ ThalLay5Idx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay6Name if present -- -1 if not used
- ThalLay6Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay6Name if present -- -1 if not used"`
+ ThalLay6Idx int32 `inactive:"+"`
pad, pad1, pad2 int32
}
@@ -61,7 +60,7 @@ func (mp *MatrixParams) Update() {
}
// GPLayerTypes is a GPLayer axon-specific layer type enum.
-type GPLayerTypes int32
+type GPLayerTypes int32 //enums:enum
// The GPLayer types
const (
@@ -78,8 +77,6 @@ const (
// GPi is the inner globus pallidus, functionally equivalent to SNr,
// receiving from MtxGo and GPeIn, and sending inhibition to VThal
GPi
-
- GPLayerTypesN
)
// GPLayer represents a globus pallidus layer, including:
@@ -87,8 +84,8 @@ const (
// Typically just a single unit per Pool representing a given stripe.
type GPParams struct {
- // [view: inline] [viewif: LayType=GPLayer] type of GP Layer -- must set during config using SetBuildConfig of GPType.
- GPType GPLayerTypes `viewif:"LayType=GPLayer" view:"inline" desc:"type of GP Layer -- must set during config using SetBuildConfig of GPType."`
+ // type of GP Layer -- must set during config using SetBuildConfig of GPType.
+ GPType GPLayerTypes `viewif:"LayType=GPLayer" view:"inline"`
pad, pad1, pad2 uint32
}
@@ -164,7 +161,7 @@ func (ly *Layer) MatrixGated(ctx *Context) {
}
}
if ctx.PlusPhase.IsTrue() && ly.Params.Matrix.IsVS.IsTrue() {
- SetGlbV(ctx, di, GvVSMatrixJustGated, bools.ToFloat32(mtxGated))
+ SetGlbV(ctx, di, GvVSMatrixJustGated, num.FromBool[float32](mtxGated))
if mtxGated {
SetGlbUSposV(ctx, di, GvVSMatrixPoolGated, uint32(poolIdx), 1)
}
@@ -263,7 +260,7 @@ func (ly *Layer) MatrixPostBuild() {
dm, err := ly.BuildConfigByName("DAMod")
if err == nil {
- err = ly.Params.Learn.NeuroMod.DAMod.FromString(dm)
+ err = ly.Params.Learn.NeuroMod.DAMod.SetString(dm)
if err != nil {
log.Println(err)
}
@@ -341,17 +338,10 @@ func (ly *Layer) GPiDefaults() {
}
}
-//go:generate stringer -type=GPLayerTypes
-
-var KiT_GPLayerTypes = kit.Enums.AddEnum(GPLayerTypesN, kit.NotBitFlag, nil)
-
-func (ev GPLayerTypes) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *GPLayerTypes) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
-
func (ly *Layer) GPPostBuild() {
gpnm, err := ly.BuildConfigByName("GPType")
if err == nil {
- err = ly.Params.GP.GPType.FromString(gpnm)
+ err = ly.Params.GP.GPType.SetString(gpnm)
if err != nil {
log.Println(err)
}
diff --git a/axon/pcore_net.go b/axon/pcore_net.go
index 6567d068c..545b02aae 100644
--- a/axon/pcore_net.go
+++ b/axon/pcore_net.go
@@ -5,7 +5,7 @@
package axon
import (
- "github.com/emer/emergent/prjn"
+ "github.com/emer/emergent/v2/prjn"
)
// AddBG adds MtxGo, MtxNo, GPeOut, GPeIn, GPeTA, STNp, STNs, GPi layers,
diff --git a/axon/pcore_prjns.go b/axon/pcore_prjns.go
index f9bcb1606..a041539bf 100644
--- a/axon/pcore_prjns.go
+++ b/axon/pcore_prjns.go
@@ -13,8 +13,8 @@ package axon
// Trace is applied to DWt and reset at the time of reward.
type MatrixPrjnParams struct {
- // [def: 1] learning rate for when ACh was elevated but no gating took place, in proportion to the level of ACh that indicates the salience of the event. A low level of this learning prevents the highly maladaptive situation where the BG is not gating and thus no learning can occur.
- NoGateLRate float32 `def:"1" desc:"learning rate for when ACh was elevated but no gating took place, in proportion to the level of ACh that indicates the salience of the event. A low level of this learning prevents the highly maladaptive situation where the BG is not gating and thus no learning can occur."`
+ // learning rate for when ACh was elevated but no gating took place, in proportion to the level of ACh that indicates the salience of the event. A low level of this learning prevents the highly maladaptive situation where the BG is not gating and thus no learning can occur.
+ NoGateLRate float32 `def:"1"`
pad, pad1, pad2 float32
}
diff --git a/axon/pool.go b/axon/pool.go
index 14abedc62..f0dea04ad 100644
--- a/axon/pool.go
+++ b/axon/pool.go
@@ -6,8 +6,8 @@ package axon
import (
"github.com/emer/axon/fsfffb"
- "github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "goki.dev/gosl/v2/slbool"
+ "goki.dev/mat32/v2"
)
//gosl: hlsl pool
@@ -24,17 +24,17 @@ import (
// based on values from the prior cycle -- thus are 1 cycle behind in general.
type AvgMaxPhases struct {
- // [view: inline] updated every cycle -- this is the source of all subsequent time scales
- Cycle AvgMaxI32 `view:"inline" desc:"updated every cycle -- this is the source of all subsequent time scales"`
+ // updated every cycle -- this is the source of all subsequent time scales
+ Cycle AvgMaxI32 `view:"inline"`
- // [view: inline] at the end of the minus phase
- Minus AvgMaxI32 `view:"inline" desc:"at the end of the minus phase"`
+ // at the end of the minus phase
+ Minus AvgMaxI32 `view:"inline"`
- // [view: inline] at the end of the plus phase
- Plus AvgMaxI32 `view:"inline" desc:"at the end of the plus phase"`
+ // at the end of the plus phase
+ Plus AvgMaxI32 `view:"inline"`
- // [view: inline] at the end of the previous plus phase
- Prev AvgMaxI32 `view:"inline" desc:"at the end of the previous plus phase"`
+ // at the end of the previous plus phase
+ Prev AvgMaxI32 `view:"inline"`
}
// CycleToMinus grabs current Cycle values into the Minus phase values
@@ -67,23 +67,23 @@ func (am *AvgMaxPhases) Zero() {
// based on values from the prior cycle -- thus are 1 cycle behind in general.
type PoolAvgMax struct {
- // [view: inline] avg and maximum CaSpkP (continuously updated at roughly 40 msec integration window timescale, ends up capturing potentiation, plus-phase signal) -- this is the primary variable to use for tracking overall pool activity
- CaSpkP AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum CaSpkP (continuously updated at roughly 40 msec integration window timescale, ends up capturing potentiation, plus-phase signal) -- this is the primary variable to use for tracking overall pool activity"`
+ // avg and maximum CaSpkP (continuously updated at roughly 40 msec integration window timescale, ends up capturing potentiation, plus-phase signal) -- this is the primary variable to use for tracking overall pool activity
+ CaSpkP AvgMaxPhases `inactive:"+" view:"inline"`
- // [view: inline] avg and maximum CaSpkD longer-term depression / DAPK1 signal in layer
- CaSpkD AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum CaSpkD longer-term depression / DAPK1 signal in layer"`
+ // avg and maximum CaSpkD longer-term depression / DAPK1 signal in layer
+ CaSpkD AvgMaxPhases `inactive:"+" view:"inline"`
- // [view: inline] avg and maximum SpkMax value (based on CaSpkP) -- reflects peak activity at any point across the cycle
- SpkMax AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum SpkMax value (based on CaSpkP) -- reflects peak activity at any point across the cycle"`
+ // avg and maximum SpkMax value (based on CaSpkP) -- reflects peak activity at any point across the cycle
+ SpkMax AvgMaxPhases `inactive:"+" view:"inline"`
- // [view: inline] avg and maximum Act firing rate value
- Act AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum Act firing rate value"`
+ // avg and maximum Act firing rate value
+ Act AvgMaxPhases `inactive:"+" view:"inline"`
- // [view: inline] avg and maximum GeInt integrated running-average excitatory conductance value
- GeInt AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum GeInt integrated running-average excitatory conductance value"`
+ // avg and maximum GeInt integrated running-average excitatory conductance value
+ GeInt AvgMaxPhases `inactive:"+" view:"inline"`
- // [view: inline] avg and maximum GiInt integrated running-average inhibitory conductance value
- GiInt AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum GiInt integrated running-average inhibitory conductance value"`
+ // avg and maximum GiInt integrated running-average inhibitory conductance value
+ GiInt AvgMaxPhases `inactive:"+" view:"inline"`
}
// SetN sets the N for aggregation
@@ -177,33 +177,33 @@ func (am *PoolAvgMax) Calc(refIdx int32) {
type Pool struct {
// starting and ending (exlusive) layer-wise indexes for the list of neurons in this pool
- StIdx, EdIdx uint32 `inactive:"+" desc:"starting and ending (exlusive) layer-wise indexes for the list of neurons in this pool"`
+ StIdx, EdIdx uint32 `inactive:"+"`
- // [view: -] layer index in global layer list
- LayIdx uint32 `view:"-" desc:"layer index in global layer list"`
+ // layer index in global layer list
+ LayIdx uint32 `view:"-"`
- // [view: -] data parallel index (innermost index per layer)
- DataIdx uint32 `view:"-" desc:"data parallel index (innermost index per layer)"`
+ // data parallel index (innermost index per layer)
+ DataIdx uint32 `view:"-"`
- // [view: -] pool index in global pool list: [Layer][Pool][Data]
- PoolIdx uint32 `view:"-" desc:"pool index in global pool list: [Layer][Pool][Data]"`
+ // pool index in global pool list:
+ PoolIdx uint32 `view:"-"`
// is this a layer-wide pool? if not, it represents a sub-pool of units within a 4D layer
- IsLayPool slbool.Bool `inactive:"+" desc:"is this a layer-wide pool? if not, it represents a sub-pool of units within a 4D layer"`
+ IsLayPool slbool.Bool `inactive:"+"`
// for special types where relevant (e.g., MatrixLayer, BGThalLayer), indicates if the pool was gated
- Gated slbool.Bool `inactive:"+" desc:"for special types where relevant (e.g., MatrixLayer, BGThalLayer), indicates if the pool was gated"`
+ Gated slbool.Bool `inactive:"+"`
pad uint32
// fast-slow FFFB inhibition values
- Inhib fsfffb.Inhib `inactive:"+" desc:"fast-slow FFFB inhibition values"`
+ Inhib fsfffb.Inhib `inactive:"+"`
// average and max values for relevant variables in this pool, at different time scales
- AvgMax PoolAvgMax `desc:"average and max values for relevant variables in this pool, at different time scales"`
+ AvgMax PoolAvgMax
- // [view: inline] absolute value of AvgDif differences from actual neuron ActPct relative to TrgAvg
- AvgDif AvgMaxI32 `inactive:"+" view:"inline" desc:"absolute value of AvgDif differences from actual neuron ActPct relative to TrgAvg"`
+ // absolute value of AvgDif differences from actual neuron ActPct relative to TrgAvg
+ AvgDif AvgMaxI32 `inactive:"+" view:"inline"`
}
// Init is callled during InitActs
diff --git a/axon/pool_test.go b/axon/pool_test.go
index 6beba9bc2..b77a1e32c 100644
--- a/axon/pool_test.go
+++ b/axon/pool_test.go
@@ -10,8 +10,8 @@ import (
"os"
"testing"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
)
// Note: subsequent params applied after Base
diff --git a/axon/prjn.go b/axon/prjn.go
index 5a936cb6e..c95721ccb 100644
--- a/axon/prjn.go
+++ b/axon/prjn.go
@@ -9,12 +9,10 @@ import (
"io"
"strconv"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/weights"
- "github.com/emer/etable/etensor"
- "github.com/goki/ki/indent"
- "github.com/goki/ki/ki"
- "github.com/goki/ki/kit"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/weights"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/glop/indent"
)
// https://github.com/kisvegabor/abbreviations-in-code suggests Buf instead of Buff
@@ -28,11 +26,9 @@ type Prjn struct {
PrjnBase
// all prjn-level parameters -- these must remain constant once configured
- Params *PrjnParams `desc:"all prjn-level parameters -- these must remain constant once configured"`
+ Params *PrjnParams
}
-var KiT_Prjn = kit.Types.AddType(&Prjn{}, PrjnProps)
-
// Object returns the object with parameters to be set by emer.Params
func (pj *Prjn) Object() any {
return pj.Params
@@ -538,7 +534,3 @@ func (pj *Prjn) InitGBuffs() {
pj.GSyns[ri] = 0
}
}
-
-var PrjnProps = ki.Props{
- "EnumType:Typ": KiT_PrjnTypes, // uses our PrjnTypes for GUI
-}
diff --git a/axon/prjn_compute.go b/axon/prjn_compute.go
index 6111a686d..ae89030d5 100644
--- a/axon/prjn_compute.go
+++ b/axon/prjn_compute.go
@@ -188,8 +188,8 @@ func (pj *Prjn) SWtFmWt(ctx *Context) {
if rlay.Params.IsTarget() {
return
}
- max := pj.Params.SWts.Limit.Max
- min := pj.Params.SWts.Limit.Min
+ mx := pj.Params.SWts.Limit.Max
+ mn := pj.Params.SWts.Limit.Min
lr := pj.Params.SWts.Adapt.LRate
for lni := uint32(0); lni < rlay.NNeurons; lni++ {
syIdxs := pj.RecvSynIdxs(lni)
@@ -202,9 +202,9 @@ func (pj *Prjn) SWtFmWt(ctx *Context) {
syni := pj.SynStIdx + syi
swt := SynV(ctx, syni, SWt)
if SynV(ctx, syni, DSWt) >= 0 { // softbound for SWt
- MulSynV(ctx, syni, DSWt, (max - swt))
+ MulSynV(ctx, syni, DSWt, (mx - swt))
} else {
- MulSynV(ctx, syni, DSWt, (swt - min))
+ MulSynV(ctx, syni, DSWt, (swt - mn))
}
avgDWt += SynV(ctx, syni, DSWt)
}
diff --git a/axon/prjnbase.go b/axon/prjnbase.go
index cec124f93..a6ef5dabd 100644
--- a/axon/prjnbase.go
+++ b/axon/prjnbase.go
@@ -8,13 +8,13 @@ import (
"errors"
"log"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/minmax"
- "github.com/goki/gi/giv"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/mat32/v2"
)
// index naming:
@@ -31,68 +31,68 @@ import (
// accessed via the AxonPrj field.
type PrjnBase struct {
- // [view: -] we need a pointer to ourselves as an AxonPrjn, which can always be used to extract the true underlying type of object when prjn is embedded in other structs -- function receivers do not have this ability so this is necessary.
- AxonPrj AxonPrjn `copy:"-" json:"-" xml:"-" view:"-" desc:"we need a pointer to ourselves as an AxonPrjn, which can always be used to extract the true underlying type of object when prjn is embedded in other structs -- function receivers do not have this ability so this is necessary."`
+ // we need a pointer to ourselves as an AxonPrjn, which can always be used to extract the true underlying type of object when prjn is embedded in other structs -- function receivers do not have this ability so this is necessary.
+ AxonPrj AxonPrjn `copy:"-" json:"-" xml:"-" view:"-"`
// inactivate this projection -- allows for easy experimentation
- Off bool `desc:"inactivate this projection -- allows for easy experimentation"`
+ Off bool
// Class is for applying parameter styles, can be space separated multple tags
- Cls string `desc:"Class is for applying parameter styles, can be space separated multple tags"`
+ Cls string
// can record notes about this projection here
- Notes string `desc:"can record notes about this projection here"`
+ Notes string
// sending layer for this projection
- Send *Layer `desc:"sending layer for this projection"`
+ Send *Layer
// receiving layer for this projection
- Recv *Layer `desc:"receiving layer for this projection"`
+ Recv *Layer
- // [tableview: -] pattern of connectivity
- Pat prjn.Pattern `tableview:"-" desc:"pattern of connectivity"`
+ // pattern of connectivity
+ Pat prjn.Pattern `tableview:"-"`
// type of projection -- Forward, Back, Lateral, or extended type in specialized algorithms -- matches against .Cls parameter styles (e.g., .Back etc)
- Typ PrjnTypes `desc:"type of projection -- Forward, Back, Lateral, or extended type in specialized algorithms -- matches against .Cls parameter styles (e.g., .Back etc)"`
+ Typ PrjnTypes
- // [tableview: -] default parameters that are applied prior to user-set parameters -- these are useful for specific functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a prjn type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map.
- DefParams params.Params `tableview:"-" desc:"default parameters that are applied prior to user-set parameters -- these are useful for specific functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a prjn type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map."`
+ // default parameters that are applied prior to user-set parameters -- these are useful for specific functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a prjn type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map.
+ DefParams params.Params `tableview:"-"`
- // [tableview: -] provides a history of parameters applied to the layer
- ParamsHistory params.HistoryImpl `tableview:"-" desc:"provides a history of parameters applied to the layer"`
+ // provides a history of parameters applied to the layer
+ ParamsHistory params.HistoryImpl `tableview:"-"`
- // [view: inline] [tableview: -] average and maximum number of recv connections in the receiving layer
- RecvConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline" desc:"average and maximum number of recv connections in the receiving layer"`
+ // average and maximum number of recv connections in the receiving layer
+ RecvConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline"`
- // [view: inline] [tableview: -] average and maximum number of sending connections in the sending layer
- SendConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline" desc:"average and maximum number of sending connections in the sending layer"`
+ // average and maximum number of sending connections in the sending layer
+ SendConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline"`
- // [view: -] start index into global Synapse array: [Layer][SendPrjns][Synapses]
- SynStIdx uint32 `view:"-" desc:"start index into global Synapse array: [Layer][SendPrjns][Synapses]"`
+ // start index into global Synapse array:
+ SynStIdx uint32 `view:"-"`
- // [view: -] number of synapses in this projection
- NSyns uint32 `view:"-" desc:"number of synapses in this projection"`
+ // number of synapses in this projection
+ NSyns uint32 `view:"-"`
- // [view: -] [RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnRecvCons slice for GPU usage.
- RecvCon []StartN `view:"-" desc:"[RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnRecvCons slice for GPU usage."`
+ // starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnRecvCons slice for GPU usage.
+ RecvCon []StartN `view:"-"`
- // [view: -] [SendNeurons][SendCon.N RecvNeurons] index into Syns synaptic state for each sending unit and connection within that, for the sending projection which does not own the synapses, and instead indexes into recv-ordered list
- RecvSynIdx []uint32 `view:"-" desc:"[SendNeurons][SendCon.N RecvNeurons] index into Syns synaptic state for each sending unit and connection within that, for the sending projection which does not own the synapses, and instead indexes into recv-ordered list"`
+ // index into Syns synaptic state for each sending unit and connection within that, for the sending projection which does not own the synapses, and instead indexes into recv-ordered list
+ RecvSynIdx []uint32 `view:"-"`
- // [view: -] [RecvNeurons][RecvCon.N SendingNeurons] for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse.
- RecvConIdx []uint32 `view:"-" desc:"[RecvNeurons][RecvCon.N SendingNeurons] for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse."`
+ // for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse.
+ RecvConIdx []uint32 `view:"-"`
- // [view: -] [SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnSendCons slice for GPU usage.
- SendCon []StartN `view:"-" desc:"[SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnSendCons slice for GPU usage."`
+ // starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnSendCons slice for GPU usage.
+ SendCon []StartN `view:"-"`
- // [view: -] [SendNeurons[[SendCon.N RecvNeurons] index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse.
- SendConIdx []uint32 `view:"-" desc:"[SendNeurons[[SendCon.N RecvNeurons] index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse."`
+ // index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse.
+ SendConIdx []uint32 `view:"-"`
- // [view: -] [RecvNeurons][Params.Com.MaxDelay][MaxData] Ge or Gi conductance ring buffer for each neuron, accessed through Params.Com.ReadIdx, WriteIdx -- scale * weight is added with Com delay offset -- a subslice from network PrjnGBuf. Uses int-encoded float values for faster GPU atomic integration
- GBuf []int32 `view:"-" desc:"[RecvNeurons][Params.Com.MaxDelay][MaxData] Ge or Gi conductance ring buffer for each neuron, accessed through Params.Com.ReadIdx, WriteIdx -- scale * weight is added with Com delay offset -- a subslice from network PrjnGBuf. Uses int-encoded float values for faster GPU atomic integration"`
+ // Ge or Gi conductance ring buffer for each neuron, accessed through Params.Com.ReadIdx, WriteIdx -- scale * weight is added with Com delay offset -- a subslice from network PrjnGBuf. Uses int-encoded float values for faster GPU atomic integration
+ GBuf []int32 `view:"-"`
- // [view: -] [RecvNeurons][MaxData] projection-level synaptic conductance values, integrated by prjn before being integrated at the neuron level, which enables the neuron to perform non-linear integration as needed -- a subslice from network PrjnGSyn.
- GSyns []float32 `view:"-" desc:"[RecvNeurons][MaxData] projection-level synaptic conductance values, integrated by prjn before being integrated at the neuron level, which enables the neuron to perform non-linear integration as needed -- a subslice from network PrjnGSyn."`
+ // projection-level synaptic conductance values, integrated by prjn before being integrated at the neuron level, which enables the neuron to perform non-linear integration as needed -- a subslice from network PrjnGSyn.
+ GSyns []float32 `view:"-"`
}
// emer.Prjn interface
diff --git a/axon/prjngtypes_string.go b/axon/prjngtypes_string.go
deleted file mode 100644
index 48f4977ea..000000000
--- a/axon/prjngtypes_string.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Code generated by "stringer -type=PrjnGTypes"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[ExcitatoryG-0]
- _ = x[InhibitoryG-1]
- _ = x[ModulatoryG-2]
- _ = x[MaintG-3]
- _ = x[ContextG-4]
- _ = x[PrjnGTypesN-5]
-}
-
-const _PrjnGTypes_name = "ExcitatoryGInhibitoryGModulatoryGMaintGContextGPrjnGTypesN"
-
-var _PrjnGTypes_index = [...]uint8{0, 11, 22, 33, 39, 47, 58}
-
-func (i PrjnGTypes) String() string {
- if i < 0 || i >= PrjnGTypes(len(_PrjnGTypes_index)-1) {
- return "PrjnGTypes(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _PrjnGTypes_name[_PrjnGTypes_index[i]:_PrjnGTypes_index[i+1]]
-}
-
-func (i *PrjnGTypes) FromString(s string) error {
- for j := 0; j < len(_PrjnGTypes_index)-1; j++ {
- if s == _PrjnGTypes_name[_PrjnGTypes_index[j]:_PrjnGTypes_index[j+1]] {
- *i = PrjnGTypes(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: PrjnGTypes")
-}
-
-var _PrjnGTypes_descMap = map[PrjnGTypes]string{
- 0: `Excitatory projections drive Ge conductance on receiving neurons, which send to GiRaw and GiSyn neuron variables.`,
- 1: `Inhibitory projections drive Gi inhibitory conductance, which send to GiRaw and GiSyn neuron variables.`,
- 2: `Modulatory projections have a multiplicative effect on other inputs, which send to GModRaw and GModSyn neuron variables.`,
- 3: `Maintenance projections drive unique set of NMDA channels that support strong active maintenance abilities. Send to GMaintRaw and GMaintSyn neuron variables.`,
- 4: `Context projections are for inputs to CT layers, which update only at the end of the plus phase, and send to CtxtGe.`,
- 5: ``,
-}
-
-func (i PrjnGTypes) Desc() string {
- if str, ok := _PrjnGTypes_descMap[i]; ok {
- return str
- }
- return "PrjnGTypes(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/prjnparams.go b/axon/prjnparams.go
index 6994bbdc4..69096b7c6 100644
--- a/axon/prjnparams.go
+++ b/axon/prjnparams.go
@@ -8,7 +8,7 @@ import (
"encoding/json"
"strings"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: hlsl prjnparams
@@ -31,10 +31,10 @@ import (
type StartN struct {
// starting offset
- Start uint32 `desc:"starting offset"`
+ Start uint32
- // number of items -- [Start:Start+N]
- N uint32 `desc:"number of items -- [Start:Start+N]"`
+ // number of items --
+ N uint32
pad, pad1 uint32 // todo: see if we can do without these?
}
@@ -77,10 +77,10 @@ func (pi *PrjnIdxs) SendNIdxToLayIdx(ni uint32) uint32 {
type GScaleVals struct {
// scaling factor for integrating synaptic input conductances (G's), originally computed as a function of sending layer activity and number of connections, and typically adapted from there -- see Prjn.PrjnScale adapt params
- Scale float32 `inactive:"+" desc:"scaling factor for integrating synaptic input conductances (G's), originally computed as a function of sending layer activity and number of connections, and typically adapted from there -- see Prjn.PrjnScale adapt params"`
+ Scale float32 `inactive:"+"`
// normalized relative proportion of total receiving conductance for this projection: PrjnScale.Rel / sum(PrjnScale.Rel across relevant prjns)
- Rel float32 `inactive:"+" desc:"normalized relative proportion of total receiving conductance for this projection: PrjnScale.Rel / sum(PrjnScale.Rel across relevant prjns)"`
+ Rel float32 `inactive:"+"`
pad, pad1 float32
}
@@ -91,39 +91,39 @@ type GScaleVals struct {
type PrjnParams struct {
// functional type of prjn -- determines functional code path for specialized layer types, and is synchronized with the Prjn.Typ value
- PrjnType PrjnTypes `desc:"functional type of prjn -- determines functional code path for specialized layer types, and is synchronized with the Prjn.Typ value"`
+ PrjnType PrjnTypes
pad, pad1, pad2 int32
- // [view: -] recv and send neuron-level projection index array access info
- Idxs PrjnIdxs `view:"-" desc:"recv and send neuron-level projection index array access info"`
+ // recv and send neuron-level projection index array access info
+ Idxs PrjnIdxs `view:"-"`
- // [view: inline] synaptic communication parameters: delay, probability of failure
- Com SynComParams `view:"inline" desc:"synaptic communication parameters: delay, probability of failure"`
+ // synaptic communication parameters: delay, probability of failure
+ Com SynComParams `view:"inline"`
- // [view: inline] projection scaling parameters for computing GScale: modulates overall strength of projection, using both absolute and relative factors, with adaptation option to maintain target max conductances
- PrjnScale PrjnScaleParams `view:"inline" desc:"projection scaling parameters for computing GScale: modulates overall strength of projection, using both absolute and relative factors, with adaptation option to maintain target max conductances"`
+ // projection scaling parameters for computing GScale: modulates overall strength of projection, using both absolute and relative factors, with adaptation option to maintain target max conductances
+ PrjnScale PrjnScaleParams `view:"inline"`
- // [view: add-fields] slowly adapting, structural weight value parameters, which control initial weight values and slower outer-loop adjustments
- SWts SWtParams `view:"add-fields" desc:"slowly adapting, structural weight value parameters, which control initial weight values and slower outer-loop adjustments"`
+ // slowly adapting, structural weight value parameters, which control initial weight values and slower outer-loop adjustments
+ SWts SWtParams `view:"add-fields"`
- // [view: add-fields] synaptic-level learning parameters for learning in the fast LWt values.
- Learn LearnSynParams `view:"add-fields" desc:"synaptic-level learning parameters for learning in the fast LWt values."`
+ // synaptic-level learning parameters for learning in the fast LWt values.
+ Learn LearnSynParams `view:"add-fields"`
- // [view: inline] conductance scaling values
- GScale GScaleVals `view:"inline" desc:"conductance scaling values"`
+ // conductance scaling values
+ GScale GScaleVals `view:"inline"`
- // [view: inline] [viewif: PrjnType=[RWPrjn,TDPredPrjn]] Params for RWPrjn and TDPredPrjn for doing dopamine-modulated learning for reward prediction: Da * Send activity. Use in RWPredLayer or TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.
- RLPred RLPredPrjnParams `viewif:"PrjnType=[RWPrjn,TDPredPrjn]" view:"inline" desc:"Params for RWPrjn and TDPredPrjn for doing dopamine-modulated learning for reward prediction: Da * Send activity. Use in RWPredLayer or TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only."`
+ // ] Params for RWPrjn and TDPredPrjn for doing dopamine-modulated learning for reward prediction: Da * Send activity. Use in RWPredLayer or TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.
+ RLPred RLPredPrjnParams `viewif:"PrjnType=[RWPrjn,TDPredPrjn]" view:"inline"`
- // [view: inline] [viewif: PrjnType=MatrixPrjn] for trace-based learning in the MatrixPrjn. A trace of synaptic co-activity is formed, and then modulated by dopamine whenever it occurs. This bridges the temporal gap between gating activity and subsequent activity, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level from CINs.
- Matrix MatrixPrjnParams `viewif:"PrjnType=MatrixPrjn" view:"inline" desc:"for trace-based learning in the MatrixPrjn. A trace of synaptic co-activity is formed, and then modulated by dopamine whenever it occurs. This bridges the temporal gap between gating activity and subsequent activity, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level from CINs."`
+ // for trace-based learning in the MatrixPrjn. A trace of synaptic co-activity is formed, and then modulated by dopamine whenever it occurs. This bridges the temporal gap between gating activity and subsequent activity, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level from CINs.
+ Matrix MatrixPrjnParams `viewif:"PrjnType=MatrixPrjn" view:"inline"`
- // [view: inline] [viewif: PrjnType=BLAPrjn] Basolateral Amygdala projection parameters.
- BLA BLAPrjnParams `viewif:"PrjnType=BLAPrjn" view:"inline" desc:"Basolateral Amygdala projection parameters."`
+ // Basolateral Amygdala projection parameters.
+ BLA BLAPrjnParams `viewif:"PrjnType=BLAPrjn" view:"inline"`
- // [view: inline] [viewif: PrjnType=HipPrjn] Hip bench parameters.
- Hip HipPrjnParams `viewif:"PrjnType=HipPrjn" view:"inline" desc:"Hip bench parameters."`
+ // Hip bench parameters.
+ Hip HipPrjnParams `viewif:"PrjnType=HipPrjn" view:"inline"`
}
func (pj *PrjnParams) Defaults() {
diff --git a/axon/prjntypes.go b/axon/prjntypes.go
index 34fbbd03b..e718b17b8 100644
--- a/axon/prjntypes.go
+++ b/axon/prjntypes.go
@@ -4,15 +4,13 @@
package axon
-import "github.com/goki/ki/kit"
-
//gosl: start prjntypes
// PrjnTypes is an axon-specific prjn type enum,
// that encompasses all the different algorithm types supported.
// Class parameter styles automatically key off of these types.
// The first entries must be kept synchronized with the emer.PrjnType.
-type PrjnTypes int32
+type PrjnTypes int32 //enums:enum
// The projection types
const (
@@ -80,15 +78,6 @@ const (
// and subsequent outcomes, and is based biologically on synaptic tags.
// Trace is reset at time of reward based on ACh level (from CINs in biology).
MatrixPrjn
-
- PrjnTypesN
)
//gosl: end prjntypes
-
-//go:generate stringer -type=PrjnTypes
-
-var KiT_PrjnTypes = kit.Enums.AddEnum(PrjnTypesN, kit.NotBitFlag, nil)
-
-func (ev PrjnTypes) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *PrjnTypes) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
diff --git a/axon/prjntypes_string.go b/axon/prjntypes_string.go
deleted file mode 100644
index 46318703c..000000000
--- a/axon/prjntypes_string.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Code generated by "stringer -type=PrjnTypes"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[ForwardPrjn-0]
- _ = x[BackPrjn-1]
- _ = x[LateralPrjn-2]
- _ = x[InhibPrjn-3]
- _ = x[CTCtxtPrjn-4]
- _ = x[RWPrjn-5]
- _ = x[TDPredPrjn-6]
- _ = x[BLAPrjn-7]
- _ = x[HipPrjn-8]
- _ = x[VSPatchPrjn-9]
- _ = x[MatrixPrjn-10]
- _ = x[PrjnTypesN-11]
-}
-
-const _PrjnTypes_name = "ForwardPrjnBackPrjnLateralPrjnInhibPrjnCTCtxtPrjnRWPrjnTDPredPrjnBLAPrjnHipPrjnVSPatchPrjnMatrixPrjnPrjnTypesN"
-
-var _PrjnTypes_index = [...]uint8{0, 11, 19, 30, 39, 49, 55, 65, 72, 79, 90, 100, 110}
-
-func (i PrjnTypes) String() string {
- if i < 0 || i >= PrjnTypes(len(_PrjnTypes_index)-1) {
- return "PrjnTypes(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _PrjnTypes_name[_PrjnTypes_index[i]:_PrjnTypes_index[i+1]]
-}
-
-func (i *PrjnTypes) FromString(s string) error {
- for j := 0; j < len(_PrjnTypes_index)-1; j++ {
- if s == _PrjnTypes_name[_PrjnTypes_index[j]:_PrjnTypes_index[j+1]] {
- *i = PrjnTypes(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: PrjnTypes")
-}
-
-var _PrjnTypes_descMap = map[PrjnTypes]string{
- 0: `Forward is a feedforward, bottom-up projection from sensory inputs to higher layers`,
- 1: `Back is a feedback, top-down projection from higher layers back to lower layers`,
- 2: `Lateral is a lateral projection within the same layer / area`,
- 3: `Inhib is an inhibitory projection that drives inhibitory synaptic conductances instead of the default excitatory ones.`,
- 4: `CTCtxt are projections from Superficial layers to CT layers that send Burst activations drive updating of CtxtGe excitatory conductance, at end of plus (51B Bursting) phase. Biologically, this projection comes from the PT layer 5IB neurons, but it is simpler to use the Super neurons directly, and PT are optional for most network types. These projections also use a special learning rule that takes into account the temporal delays in the activation states. Can also add self context from CT for deeper temporal context.`,
- 5: `RWPrjn does dopamine-modulated learning for reward prediction: Da * Send.CaSpkP (integrated current spiking activity). Uses RLPredPrjn parameters. Use in RWPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`,
- 6: `TDPredPrjn does dopamine-modulated learning for reward prediction: DWt = Da * Send.SpkPrv (activity on *previous* timestep) Uses RLPredPrjn parameters. Use in TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`,
- 7: `BLAPrjn implements the PVLV BLA learning rule: dW = ACh * X_t-1 * (Y_t - Y_t-1) The recv delta is across trials, where the US should activate on trial boundary, to enable sufficient time for gating through to OFC, so BLA initially learns based on US present - US absent. It can also learn based on CS onset if there is a prior CS that predicts that.`,
- 8: ``,
- 9: `VSPatchPrjn implements the VSPatch learning rule: dW = ACh * DA * X * Y where DA is D1 vs. D2 modulated DA level, X = sending activity factor, Y = receiving activity factor, and ACh provides overall modulation.`,
- 10: `MatrixPrjn supports trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`,
- 11: ``,
-}
-
-func (i PrjnTypes) Desc() string {
- if str, ok := _PrjnTypes_descMap[i]; ok {
- return str
- }
- return "PrjnTypes(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/pvlv.go b/axon/pvlv.go
index 9a8e6cf48..da0c9c8ad 100644
--- a/axon/pvlv.go
+++ b/axon/pvlv.go
@@ -5,9 +5,9 @@
package axon
import (
- "github.com/emer/emergent/erand"
- "github.com/goki/ki/bools"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/glop/num"
+ "goki.dev/mat32/v2"
)
// DriveParams manages the drive parameters for computing and updating drive state.
@@ -17,19 +17,19 @@ import (
type DriveParams struct {
// minimum effective drive value -- this is an automatic baseline ensuring that a positive US results in at least some minimal level of reward. Unlike Base values, this is not reflected in the activity of the drive values -- applies at the time of reward calculation as a minimum baseline.
- DriveMin float32 `desc:"minimum effective drive value -- this is an automatic baseline ensuring that a positive US results in at least some minimal level of reward. Unlike Base values, this is not reflected in the activity of the drive values -- applies at the time of reward calculation as a minimum baseline."`
+ DriveMin float32
// baseline levels for each drive -- what they naturally trend toward in the absence of any input. Set inactive drives to 0 baseline, active ones typically elevated baseline (0-1 range).
- Base []float32 `desc:"baseline levels for each drive -- what they naturally trend toward in the absence of any input. Set inactive drives to 0 baseline, active ones typically elevated baseline (0-1 range)."`
+ Base []float32
// time constants in ThetaCycle (trial) units for natural update toward Base values -- 0 values means no natural update.
- Tau []float32 `desc:"time constants in ThetaCycle (trial) units for natural update toward Base values -- 0 values means no natural update."`
+ Tau []float32
// decrement in drive value when US is consumed, thus partially satisfying the drive -- positive values are subtracted from current Drive value.
- Satisfaction []float32 `desc:"decrement in drive value when US is consumed, thus partially satisfying the drive -- positive values are subtracted from current Drive value."`
+ Satisfaction []float32
- // [view: -] 1/Tau
- Dt []float32 `view:"-" desc:"1/Tau"`
+ // 1/Tau
+ Dt []float32 `view:"-"`
}
func (dp *DriveParams) Alloc(nDrives int) {
@@ -151,13 +151,13 @@ func (dp *DriveParams) EffectiveDrive(ctx *Context, di uint32, i uint32) float32
type UrgencyParams struct {
// value of raw urgency where the urgency activation level is 50%
- U50 float32 `desc:"value of raw urgency where the urgency activation level is 50%"`
+ U50 float32
- // [def: 4] exponent on the urge factor -- valid numbers are 1,2,4,6
- Power int32 `def:"4" desc:"exponent on the urge factor -- valid numbers are 1,2,4,6"`
+ // exponent on the urge factor -- valid numbers are 1,2,4,6
+ Power int32 `def:"4"`
- // [def: 0.2] threshold for urge -- cuts off small baseline values
- Thr float32 `def:"0.2" desc:"threshold for urge -- cuts off small baseline values"`
+ // threshold for urge -- cuts off small baseline values
+ Thr float32 `def:"0.2"`
}
func (ur *UrgencyParams) Defaults() {
@@ -220,26 +220,26 @@ func PVLVNormFun(raw float32) float32 {
// weighted and integrated to compute an overall PV primary value.
type USParams struct {
- // [def: 0.5] threshold for a negative US increment, _after_ multiplying by the USnegGains factor for that US (to allow for normalized input magnitudes that may translate into different magnitude of effects), to drive a phasic ACh response and associated VSMatrix gating and dopamine firing -- i.e., a full negative US outcome event (global NegUSOutcome flag is set)
- NegUSOutcomeThr float32 `def:"0.5" desc:"threshold for a negative US increment, _after_ multiplying by the USnegGains factor for that US (to allow for normalized input magnitudes that may translate into different magnitude of effects), to drive a phasic ACh response and associated VSMatrix gating and dopamine firing -- i.e., a full negative US outcome event (global NegUSOutcome flag is set)"`
+ // threshold for a negative US increment, _after_ multiplying by the USnegGains factor for that US (to allow for normalized input magnitudes that may translate into different magnitude of effects), to drive a phasic ACh response and associated VSMatrix gating and dopamine firing -- i.e., a full negative US outcome event (global NegUSOutcome flag is set)
+ NegUSOutcomeThr float32 `def:"0.5"`
- // [def: 2] gain factor applied to sum of weighted, drive-scaled positive USs to compute PVpos primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust the overall scaling of PVpos reward within 0-1 normalized range (see also PVnegGain). Each USpos is assumed to be in 0-1 range, default 1.
- PVposGain float32 `def:"2" desc:"gain factor applied to sum of weighted, drive-scaled positive USs to compute PVpos primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust the overall scaling of PVpos reward within 0-1 normalized range (see also PVnegGain). Each USpos is assumed to be in 0-1 range, default 1."`
+ // gain factor applied to sum of weighted, drive-scaled positive USs to compute PVpos primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust the overall scaling of PVpos reward within 0-1 normalized range (see also PVnegGain). Each USpos is assumed to be in 0-1 range, default 1.
+ PVposGain float32 `def:"2"`
- // [def: 1] gain factor applied to sum of weighted negative USs to compute PVneg primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust overall scaling of PVneg within 0-1 normalized range (see also PVposGain).
- PVnegGain float32 `def:"1" desc:"gain factor applied to sum of weighted negative USs to compute PVneg primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust overall scaling of PVneg within 0-1 normalized range (see also PVposGain)."`
+ // gain factor applied to sum of weighted negative USs to compute PVneg primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust overall scaling of PVneg within 0-1 normalized range (see also PVposGain).
+ PVnegGain float32 `def:"1"`
// gain factor for each individual negative US, multiplied prior to 1/(1+x) normalization of each term for activating the OFCnegUS pools. These gains are _not_ applied in computing summary PVneg value (see PVnegWts), and generally must be larger than the weights to leverage the dynamic range within each US pool.
- USnegGains []float32 `desc:"gain factor for each individual negative US, multiplied prior to 1/(1+x) normalization of each term for activating the OFCnegUS pools. These gains are _not_ applied in computing summary PVneg value (see PVnegWts), and generally must be larger than the weights to leverage the dynamic range within each US pool."`
+ USnegGains []float32
// weight factor applied to each separate positive US on the way to computing the overall PVpos summary value, to control the weighting of each US relative to the others. Each pos US is also multiplied by its dynamic Drive factor as well. Use PVposGain to control the overall scaling of the PVpos value.
- PVposWts []float32 `desc:"weight factor applied to each separate positive US on the way to computing the overall PVpos summary value, to control the weighting of each US relative to the others. Each pos US is also multiplied by its dynamic Drive factor as well. Use PVposGain to control the overall scaling of the PVpos value."`
+ PVposWts []float32
// weight factor applied to each separate negative US on the way to computing the overall PVneg summary value, to control the weighting of each US relative to the others. The first pool is Time, second is Effort, and these are typically weighted lower (.02) than salient simulation-specific USs (1).
- PVnegWts []float32 `desc:"weight factor applied to each separate negative US on the way to computing the overall PVneg summary value, to control the weighting of each US relative to the others. The first pool is Time, second is Effort, and these are typically weighted lower (.02) than salient simulation-specific USs (1)."`
+ PVnegWts []float32
// computed estimated US values, based on OFCposUSPT and VSMatrix gating, in PVposEst
- USposEst []float32 `inactive:"+" desc:"computed estimated US values, based on OFCposUSPT and VSMatrix gating, in PVposEst"`
+ USposEst []float32 `inactive:"+"`
}
func (us *USParams) Alloc(nPos, nNeg int) {
@@ -323,14 +323,14 @@ func (us *USParams) NegUSOutcome(ctx *Context, di uint32, usIdx int, mag float32
// or "relief" burst when actual neg < predicted.
type LHbParams struct {
- // [def: 1] threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward
- NegThr float32 `def:"1" desc:"threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward"`
+ // threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward
+ NegThr float32 `def:"1"`
- // [def: 1] gain multiplier on PVpos for purposes of generating bursts (not for discounting negative dips) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)
- BurstGain float32 `def:"1" desc:"gain multiplier on PVpos for purposes of generating bursts (not for discounting negative dips) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)"`
+ // gain multiplier on PVpos for purposes of generating bursts (not for discounting negative dips) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)
+ BurstGain float32 `def:"1"`
- // [def: 1] gain multiplier on PVneg for purposes of generating dips (not for discounting positive bursts) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)
- DipGain float32 `def:"1" desc:"gain multiplier on PVneg for purposes of generating dips (not for discounting positive bursts) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)"`
+ // gain multiplier on PVneg for purposes of generating dips (not for discounting positive bursts) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)
+ DipGain float32 `def:"1"`
}
func (lh *LHbParams) Defaults() {
@@ -405,14 +405,14 @@ func (lh *LHbParams) DAforNoUS(ctx *Context, di uint32, vsPatchPos float32) floa
// GiveUpParams are parameters for computing when to give up
type GiveUpParams struct {
- // [def: 1] threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward
- NegThr float32 `def:"1" desc:"threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward"`
+ // threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward
+ NegThr float32 `def:"1"`
- // [def: 10] multiplier on pos - neg for logistic probability function -- higher gain values produce more binary give up behavior and lower values produce more graded stochastic behavior around the threshold
- Gain float32 `def:"10" desc:"multiplier on pos - neg for logistic probability function -- higher gain values produce more binary give up behavior and lower values produce more graded stochastic behavior around the threshold"`
+ // multiplier on pos - neg for logistic probability function -- higher gain values produce more binary give up behavior and lower values produce more graded stochastic behavior around the threshold
+ Gain float32 `def:"10"`
// minimum estimated PVpos value -- deals with any errors in the estimation process to make sure that erroneous GiveUp doesn't happen.
- MinPVposEst float32 `desc:"minimum estimated PVpos value -- deals with any errors in the estimation process to make sure that erroneous GiveUp doesn't happen."`
+ MinPVposEst float32
}
func (gp *GiveUpParams) Defaults() {
@@ -450,25 +450,25 @@ func (gp *GiveUpParams) Prob(pvDiff float32, rnd erand.Rand) (float32, bool) {
type PVLV struct {
// number of possible positive US states and corresponding drives -- the first is always reserved for novelty / curiosity. Must be set programmatically via SetNUSs method, which allocates corresponding parameters.
- NPosUSs uint32 `inactive:"+" desc:"number of possible positive US states and corresponding drives -- the first is always reserved for novelty / curiosity. Must be set programmatically via SetNUSs method, which allocates corresponding parameters."`
+ NPosUSs uint32 `inactive:"+"`
- // number of possible negative US states -- [0] is reserved for accumulated time, [1] the accumulated effort cost. Must be set programmatically via SetNUSs method, which allocates corresponding parameters.
- NNegUSs uint32 `inactive:"+" desc:"number of possible negative US states -- [0] is reserved for accumulated time, [1] the accumulated effort cost. Must be set programmatically via SetNUSs method, which allocates corresponding parameters."`
+ // number of possible negative US states -- is reserved for accumulated time, the accumulated effort cost. Must be set programmatically via SetNUSs method, which allocates corresponding parameters.
+ NNegUSs uint32 `inactive:"+"`
// parameters and state for built-in drives that form the core motivations of agent, controlled by lateral hypothalamus and associated body state monitoring such as glucose levels and thirst.
- Drive DriveParams `desc:"parameters and state for built-in drives that form the core motivations of agent, controlled by lateral hypothalamus and associated body state monitoring such as glucose levels and thirst."`
+ Drive DriveParams
- // [view: inline] urgency (increasing pressure to do something) and parameters for updating it. Raw urgency is incremented by same units as effort, but is only reset with a positive US.
- Urgency UrgencyParams `view:"inline" desc:"urgency (increasing pressure to do something) and parameters for updating it. Raw urgency is incremented by same units as effort, but is only reset with a positive US."`
+ // urgency (increasing pressure to do something) and parameters for updating it. Raw urgency is incremented by same units as effort, but is only reset with a positive US.
+ Urgency UrgencyParams `view:"inline"`
// controls how positive and negative USs are weighted and integrated to compute an overall PV primary value.
- USs USParams `desc:"controls how positive and negative USs are weighted and integrated to compute an overall PV primary value."`
+ USs USParams
- // [view: inline] lateral habenula (LHb) parameters and state, which drives dipping / pausing in dopamine when the predicted positive outcome > actual, or actual negative outcome > predicted. Can also drive bursting for the converse, and via matrix phasic firing
- LHb LHbParams `view:"inline" desc:"lateral habenula (LHb) parameters and state, which drives dipping / pausing in dopamine when the predicted positive outcome > actual, or actual negative outcome > predicted. Can also drive bursting for the converse, and via matrix phasic firing"`
+ // lateral habenula (LHb) parameters and state, which drives dipping / pausing in dopamine when the predicted positive outcome > actual, or actual negative outcome > predicted. Can also drive bursting for the converse, and via matrix phasic firing
+ LHb LHbParams `view:"inline"`
// parameters for giving up based on PV pos - neg difference
- GiveUp GiveUpParams `desc:"parameters for giving up based on PV pos - neg difference"`
+ GiveUp GiveUpParams
}
func (pp *PVLV) Defaults() {
@@ -682,7 +682,7 @@ func (pp *PVLV) ResetGiveUp(ctx *Context, di uint32) {
// after reward.
func (pp *PVLV) NewState(ctx *Context, di uint32, rnd erand.Rand) {
hadRewF := GlbV(ctx, di, GvHasRew)
- hadRew := bools.FromFloat32(hadRewF)
+ hadRew := num.ToBool(hadRewF)
SetGlbV(ctx, di, GvHadRew, hadRewF)
SetGlbV(ctx, di, GvHadPosUS, GlbV(ctx, di, GvHasPosUS))
SetGlbV(ctx, di, GvHadNegUSOutcome, GlbV(ctx, di, GvNegUSOutcome))
@@ -760,7 +760,7 @@ func (pp *PVLV) PVsFmUSs(ctx *Context, di uint32) {
pvPosSum, pvPos := pp.PVpos(ctx, di)
SetGlbV(ctx, di, GvPVposSum, pvPosSum)
SetGlbV(ctx, di, GvPVpos, pvPos)
- SetGlbV(ctx, di, GvHasPosUS, bools.ToFloat32(pp.HasPosUS(ctx, di)))
+ SetGlbV(ctx, di, GvHasPosUS, num.FromBool[float32](pp.HasPosUS(ctx, di)))
pvNegSum, pvNeg := pp.PVneg(ctx, di)
SetGlbV(ctx, di, GvPVnegSum, pvNegSum)
@@ -876,7 +876,7 @@ func (pp *PVLV) GiveUpFmPV(ctx *Context, di uint32, pvNeg float32, rnd erand.Ran
SetGlbV(ctx, di, GvPVposEstDisc, posDisc)
SetGlbV(ctx, di, GvGiveUpDiff, diff)
SetGlbV(ctx, di, GvGiveUpProb, prob)
- SetGlbV(ctx, di, GvGiveUp, bools.ToFloat32(giveUp))
+ SetGlbV(ctx, di, GvGiveUp, num.FromBool[float32](giveUp))
return giveUp
}
diff --git a/axon/pvlv_layers.go b/axon/pvlv_layers.go
index 09b32df27..5220d14eb 100644
--- a/axon/pvlv_layers.go
+++ b/axon/pvlv_layers.go
@@ -8,8 +8,8 @@ import (
"log"
"strings"
- "github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "goki.dev/gosl/v2/slbool"
+ "goki.dev/mat32/v2"
)
//gosl: start pvlv_layers
@@ -18,29 +18,29 @@ import (
// as a function of the MAX activation of its inputs.
type LDTParams struct {
- // [def: 0.05] threshold per input source, on absolute value (magnitude), to count as a significant reward event, which then drives maximal ACh -- set to 0 to disable this nonlinear behavior
- SrcThr float32 `def:"0.05" desc:"threshold per input source, on absolute value (magnitude), to count as a significant reward event, which then drives maximal ACh -- set to 0 to disable this nonlinear behavior"`
+ // threshold per input source, on absolute value (magnitude), to count as a significant reward event, which then drives maximal ACh -- set to 0 to disable this nonlinear behavior
+ SrcThr float32 `def:"0.05"`
- // [def: true] use the global Context.NeuroMod.HasRew flag -- if there is some kind of external reward being given, then ACh goes to 1, else 0 for this component
- Rew slbool.Bool `def:"true" desc:"use the global Context.NeuroMod.HasRew flag -- if there is some kind of external reward being given, then ACh goes to 1, else 0 for this component"`
+ // use the global Context.NeuroMod.HasRew flag -- if there is some kind of external reward being given, then ACh goes to 1, else 0 for this component
+ Rew slbool.Bool `def:"true"`
- // [def: 2] extent to which active maintenance (via Context.NeuroMod.NotMaint PTNotMaintLayer activity) inhibits ACh signals -- when goal engaged, distractability is lower.
- MaintInhib float32 `def:"2" desc:"extent to which active maintenance (via Context.NeuroMod.NotMaint PTNotMaintLayer activity) inhibits ACh signals -- when goal engaged, distractability is lower."`
+ // extent to which active maintenance (via Context.NeuroMod.NotMaint PTNotMaintLayer activity) inhibits ACh signals -- when goal engaged, distractability is lower.
+ MaintInhib float32 `def:"2"`
- // [def: 0.4] maximum NeuroMod.NotMaint activity for computing Maint as 1-NotMaint -- when NotMaint is >= NotMaintMax, then Maint = 0.
- NotMaintMax float32 `def:"0.4" desc:"maximum NeuroMod.NotMaint activity for computing Maint as 1-NotMaint -- when NotMaint is >= NotMaintMax, then Maint = 0."`
+ // maximum NeuroMod.NotMaint activity for computing Maint as 1-NotMaint -- when NotMaint is >= NotMaintMax, then Maint = 0.
+ NotMaintMax float32 `def:"0.4"`
// idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay1Name if present -- -1 if not used
- SrcLay1Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay1Name if present -- -1 if not used"`
+ SrcLay1Idx int32 `inactive:"+"`
// idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay2Name if present -- -1 if not used
- SrcLay2Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay2Name if present -- -1 if not used"`
+ SrcLay2Idx int32 `inactive:"+"`
// idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay3Name if present -- -1 if not used
- SrcLay3Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay3Name if present -- -1 if not used"`
+ SrcLay3Idx int32 `inactive:"+"`
// idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay4Name if present -- -1 if not used
- SrcLay4Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay4Name if present -- -1 if not used"`
+ SrcLay4Idx int32 `inactive:"+"`
}
func (lp *LDTParams) Defaults() {
@@ -109,17 +109,17 @@ func (lp *LDTParams) ACh(ctx *Context, di uint32, srcLay1Act, srcLay2Act, srcLay
// VSPatchParams parameters for VSPatch learning
type VSPatchParams struct {
- // [def: 3] multiplier applied after Thr threshold
- Gain float32 `def:"3" desc:"multiplier applied after Thr threshold"`
+ // multiplier applied after Thr threshold
+ Gain float32 `def:"3"`
- // [def: 0.15] initial value for overall threshold, which adapts over time -- stored in LayerVals.ActAvgVals.AdaptThr
- ThrInit float32 `def:"0.15" desc:"initial value for overall threshold, which adapts over time -- stored in LayerVals.ActAvgVals.AdaptThr"`
+ // initial value for overall threshold, which adapts over time -- stored in LayerVals.ActAvgVals.AdaptThr
+ ThrInit float32 `def:"0.15"`
- // [def: 0,0.002] learning rate for the threshold -- moves in proportion to same predictive error signal that drives synaptic learning
- ThrLRate float32 `def:"0,0.002" desc:"learning rate for the threshold -- moves in proportion to same predictive error signal that drives synaptic learning"`
+ // learning rate for the threshold -- moves in proportion to same predictive error signal that drives synaptic learning
+ ThrLRate float32 `def:"0,0.002"`
- // [def: 10] extra gain factor for non-reward trials, which is the most critical
- ThrNonRew float32 `def:"10" desc:"extra gain factor for non-reward trials, which is the most critical"`
+ // extra gain factor for non-reward trials, which is the most critical
+ ThrNonRew float32 `def:"10"`
}
func (vp *VSPatchParams) Defaults() {
@@ -149,11 +149,11 @@ func (vp *VSPatchParams) ThrVal(act, thr float32) float32 {
// every cycle.
type VTAParams struct {
- // [def: 0.75] gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values
- CeMGain float32 `def:"0.75" desc:"gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values"`
+ // gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values
+ CeMGain float32 `def:"0.75"`
- // [def: 1.25] gain on computed LHb DA (Burst - Dip) -- for controlling DA levels
- LHbGain float32 `def:"1.25" desc:"gain on computed LHb DA (Burst - Dip) -- for controlling DA levels"`
+ // gain on computed LHb DA (Burst - Dip) -- for controlling DA levels
+ LHbGain float32 `def:"1.25"`
pad, pad1 float32
}
@@ -261,14 +261,14 @@ func (ly *Layer) BLADefaults() {
func (ly *Layer) PVLVPostBuild() {
dm, err := ly.BuildConfigByName("DAMod")
if err == nil {
- err = ly.Params.Learn.NeuroMod.DAMod.FromString(dm)
+ err = ly.Params.Learn.NeuroMod.DAMod.SetString(dm)
if err != nil {
log.Println(err)
}
}
vl, err := ly.BuildConfigByName("Valence")
if err == nil {
- err = ly.Params.Learn.NeuroMod.Valence.FromString(vl)
+ err = ly.Params.Learn.NeuroMod.Valence.SetString(vl)
if err != nil {
log.Println(err)
}
diff --git a/axon/pvlv_net.go b/axon/pvlv_net.go
index 9dc8bbf5e..a58395dda 100644
--- a/axon/pvlv_net.go
+++ b/axon/pvlv_net.go
@@ -5,9 +5,9 @@
package axon
import (
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
)
// AddLDTLayer adds a LDTLayer
diff --git a/axon/pvlv_prjns.go b/axon/pvlv_prjns.go
index af9d6cc0a..e001c7bbd 100644
--- a/axon/pvlv_prjns.go
+++ b/axon/pvlv_prjns.go
@@ -15,14 +15,14 @@ package axon
// conditioning -- default of 1 means none, but can be increased as needed.
type BLAPrjnParams struct {
- // [def: 0.01,1] use 0.01 for acquisition (don't unlearn) and 1 for extinction -- negative delta learning rate multiplier
- NegDeltaLRate float32 `def:"0.01,1" desc:"use 0.01 for acquisition (don't unlearn) and 1 for extinction -- negative delta learning rate multiplier"`
+ // use 0.01 for acquisition (don't unlearn) and 1 for extinction -- negative delta learning rate multiplier
+ NegDeltaLRate float32 `def:"0.01,1"`
- // [def: 0.1] threshold on this layer's ACh level for trace learning updates
- AChThr float32 `def:"0.1" desc:"threshold on this layer's ACh level for trace learning updates"`
+ // threshold on this layer's ACh level for trace learning updates
+ AChThr float32 `def:"0.1"`
- // [def: 0,0.5] proportion of US time stimulus activity to use for the trace component of
- USTrace float32 `def:"0,0.5" desc:"proportion of US time stimulus activity to use for the trace component of "`
+ // proportion of US time stimulus activity to use for the trace component of
+ USTrace float32 `def:"0,0.5"`
pad float32
}
diff --git a/axon/rand.go b/axon/rand.go
index 21821ed7c..937b1c5b5 100644
--- a/axon/rand.go
+++ b/axon/rand.go
@@ -1,7 +1,7 @@
package axon
import (
- "github.com/goki/gosl/slrand"
+ "goki.dev/gosl/v2/slrand"
)
//gosl: hlsl axonrand
diff --git a/axon/rl_layers.go b/axon/rl_layers.go
index da5a61d96..8fe1076db 100644
--- a/axon/rl_layers.go
+++ b/axon/rl_layers.go
@@ -5,7 +5,7 @@
package axon
import (
- "github.com/emer/etable/minmax"
+ "goki.dev/etable/v2/minmax"
)
//gosl: start rl_layers
@@ -15,7 +15,7 @@ import (
type RWPredParams struct {
// default 0.1..0.99 range of predictions that can be represented -- having a truncated range preserves some sensitivity in dopamine at the extremes of good or poor performance
- PredRange minmax.F32 `desc:"default 0.1..0.99 range of predictions that can be represented -- having a truncated range preserves some sensitivity in dopamine at the extremes of good or poor performance"`
+ PredRange minmax.F32
}
func (rp *RWPredParams) Defaults() {
@@ -30,10 +30,10 @@ func (rp *RWPredParams) Update() {
type RWDaParams struct {
// tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value
- TonicGe float32 `desc:"tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value"`
+ TonicGe float32
// idx of RWPredLayer to get reward prediction from -- set during Build from BuildConfig RWPredLayName
- RWPredLayIdx int32 `inactive:"+" desc:"idx of RWPredLayer to get reward prediction from -- set during Build from BuildConfig RWPredLayName"`
+ RWPredLayIdx int32 `inactive:"+"`
pad, pad1 uint32
}
@@ -58,13 +58,13 @@ func (rp *RWDaParams) GeFmDA(da float32) float32 {
type TDIntegParams struct {
// discount factor -- how much to discount the future prediction from TDPred
- Discount float32 `desc:"discount factor -- how much to discount the future prediction from TDPred"`
+ Discount float32
// gain factor on TD rew pred activations
- PredGain float32 `desc:"gain factor on TD rew pred activations"`
+ PredGain float32
// idx of TDPredLayer to get reward prediction from -- set during Build from BuildConfig TDPredLayName
- TDPredLayIdx int32 `inactive:"+" desc:"idx of TDPredLayer to get reward prediction from -- set during Build from BuildConfig TDPredLayName"`
+ TDPredLayIdx int32 `inactive:"+"`
pad uint32
}
@@ -82,10 +82,10 @@ func (tp *TDIntegParams) Update() {
type TDDaParams struct {
// tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value
- TonicGe float32 `desc:"tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value"`
+ TonicGe float32
// idx of TDIntegLayer to get reward prediction from -- set during Build from BuildConfig TDIntegLayName
- TDIntegLayIdx int32 `inactive:"+" desc:"idx of TDIntegLayer to get reward prediction from -- set during Build from BuildConfig TDIntegLayName"`
+ TDIntegLayIdx int32 `inactive:"+"`
pad, pad1 uint32
}
diff --git a/axon/rl_net.go b/axon/rl_net.go
index a5c5c2377..d5e1591bf 100644
--- a/axon/rl_net.go
+++ b/axon/rl_net.go
@@ -5,8 +5,8 @@
package axon
import (
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
)
// AddRewLayer adds a RewLayer of given name
diff --git a/axon/rl_prjns.go b/axon/rl_prjns.go
index bb3bfda5f..7145efa48 100644
--- a/axon/rl_prjns.go
+++ b/axon/rl_prjns.go
@@ -13,10 +13,10 @@ package axon
type RLPredPrjnParams struct {
// how much to learn on opposite DA sign coding neuron (0..1)
- OppSignLRate float32 `desc:"how much to learn on opposite DA sign coding neuron (0..1)"`
+ OppSignLRate float32
// tolerance on DA -- if below this abs value, then DA goes to zero and there is no learning -- prevents prediction from exactly learning to cancel out reward value, retaining a residual valence of signal
- DaTol float32 `desc:"tolerance on DA -- if below this abs value, then DA goes to zero and there is no learning -- prevents prediction from exactly learning to cancel out reward value, retaining a residual valence of signal"`
+ DaTol float32
pad, pad1 float32
}
diff --git a/axon/shaders/Makefile b/axon/shaders/Makefile
index 7fb7ba39d..408124029 100644
--- a/axon/shaders/Makefile
+++ b/axon/shaders/Makefile
@@ -2,7 +2,7 @@
# The go generate command does this automatically.
all:
- cd ../; gosl -exclude=Update,UpdateParams,Defaults,AllParams github.com/goki/mat32/fastexp.go github.com/emer/etable/minmax ../chans/chans.go ../chans ../kinase ../fsfffb/inhib.go ../fsfffb github.com/emer/emergent/etime github.com/emer/emergent/ringidx rand.go avgmax.go neuromod.go globals.go context.go neuron.go synapse.go pool.go layervals.go act.go act_prjn.go inhib.go learn.go layertypes.go layerparams.go deep_layers.go rl_layers.go pvlv_layers.go pcore_layers.go prjntypes.go prjnparams.go deep_prjns.go rl_prjns.go pvlv_prjns.go pcore_prjns.go hip_prjns.go gpu_hlsl
+ cd ../; gosl -exclude=Update,UpdateParams,Defaults,AllParams goki.dev/mat32/v2/fastexp.go goki.dev/etable/v2/minmax ../chans/chans.go ../chans ../kinase ../fsfffb/inhib.go ../fsfffb github.com/emer/emergent/v2/etime github.com/emer/emergent/v2/ringidx rand.go avgmax.go neuromod.go globals.go context.go neuron.go synapse.go pool.go layervals.go act.go act_prjn.go inhib.go learn.go layertypes.go layerparams.go deep_layers.go rl_layers.go pvlv_layers.go pcore_layers.go prjntypes.go prjnparams.go deep_prjns.go rl_prjns.go pvlv_prjns.go pcore_prjns.go hip_prjns.go gpu_hlsl
# note: gosl automatically compiles the hlsl files using this command:
%.spv : %.hlsl
diff --git a/axon/shaders/gpu_applyext.spv b/axon/shaders/gpu_applyext.spv
index dcc7577b2..4e90ee168 100644
Binary files a/axon/shaders/gpu_applyext.spv and b/axon/shaders/gpu_applyext.spv differ
diff --git a/axon/shaders/gpu_cycle.spv b/axon/shaders/gpu_cycle.spv
index 59d50de1a..cac0d7b14 100644
Binary files a/axon/shaders/gpu_cycle.spv and b/axon/shaders/gpu_cycle.spv differ
diff --git a/axon/shaders/gpu_cycleinc.spv b/axon/shaders/gpu_cycleinc.spv
index 366b28aa7..5390ebf8a 100644
Binary files a/axon/shaders/gpu_cycleinc.spv and b/axon/shaders/gpu_cycleinc.spv differ
diff --git a/axon/shaders/gpu_cyclepost.spv b/axon/shaders/gpu_cyclepost.spv
index 19ca7871a..e41dff144 100644
Binary files a/axon/shaders/gpu_cyclepost.spv and b/axon/shaders/gpu_cyclepost.spv differ
diff --git a/axon/shaders/gpu_dwt.spv b/axon/shaders/gpu_dwt.spv
index 37a2d866a..5f8acb5e8 100644
Binary files a/axon/shaders/gpu_dwt.spv and b/axon/shaders/gpu_dwt.spv differ
diff --git a/axon/shaders/gpu_laygi.spv b/axon/shaders/gpu_laygi.spv
index 94c1655df..4df40e81d 100644
Binary files a/axon/shaders/gpu_laygi.spv and b/axon/shaders/gpu_laygi.spv differ
diff --git a/axon/shaders/gpu_minuspool.spv b/axon/shaders/gpu_minuspool.spv
index 679fa62b3..3376fb0db 100644
Binary files a/axon/shaders/gpu_minuspool.spv and b/axon/shaders/gpu_minuspool.spv differ
diff --git a/axon/shaders/gpu_newstate_neuron.spv b/axon/shaders/gpu_newstate_neuron.spv
index d62cbfff4..8d46bd35d 100644
Binary files a/axon/shaders/gpu_newstate_neuron.spv and b/axon/shaders/gpu_newstate_neuron.spv differ
diff --git a/axon/shaders/gpu_newstate_pool.spv b/axon/shaders/gpu_newstate_pool.spv
index 4ca101ef8..c295611da 100644
Binary files a/axon/shaders/gpu_newstate_pool.spv and b/axon/shaders/gpu_newstate_pool.spv differ
diff --git a/axon/shaders/gpu_plusneuron.spv b/axon/shaders/gpu_plusneuron.spv
index 8d59a1c79..162676f38 100644
Binary files a/axon/shaders/gpu_plusneuron.spv and b/axon/shaders/gpu_plusneuron.spv differ
diff --git a/axon/shaders/gpu_poolgi.spv b/axon/shaders/gpu_poolgi.spv
index 1ce9ba962..65d3c779c 100644
Binary files a/axon/shaders/gpu_poolgi.spv and b/axon/shaders/gpu_poolgi.spv differ
diff --git a/axon/shaders/gpu_postspike.spv b/axon/shaders/gpu_postspike.spv
index c8e6edd62..f8ab888e7 100644
Binary files a/axon/shaders/gpu_postspike.spv and b/axon/shaders/gpu_postspike.spv differ
diff --git a/axon/shaders/gpu_sendspike.spv b/axon/shaders/gpu_sendspike.spv
index d7c30ee75..a3195cb0f 100644
Binary files a/axon/shaders/gpu_sendspike.spv and b/axon/shaders/gpu_sendspike.spv differ
diff --git a/axon/shaders/gpu_synca.spv b/axon/shaders/gpu_synca.spv
index 7e359038d..20e2ce6fb 100644
Binary files a/axon/shaders/gpu_synca.spv and b/axon/shaders/gpu_synca.spv differ
diff --git a/axon/synapse.go b/axon/synapse.go
index 31b96e185..c9e5b8998 100644
--- a/axon/synapse.go
+++ b/axon/synapse.go
@@ -6,34 +6,13 @@ package axon
import (
"fmt"
-
- "github.com/goki/ki/kit"
)
-//go:generate stringer -type=SynapseVars
-//go:generate stringer -type=SynapseCaVars
-//go:generate stringer -type=SynapseIdxs
-
-var KiT_SynapseVars = kit.Enums.AddEnum(SynapseVarsN, kit.NotBitFlag, nil)
-
-func (ev SynapseVars) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *SynapseVars) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
-
-var KiT_SynapseCaVars = kit.Enums.AddEnum(SynapseCaVarsN, kit.NotBitFlag, nil)
-
-func (ev SynapseCaVars) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *SynapseCaVars) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
-
-var KiT_SynapseIdxs = kit.Enums.AddEnum(SynapseIdxsN, kit.NotBitFlag, nil)
-
-func (ev SynapseIdxs) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *SynapseIdxs) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
-
//gosl: start synapse
// SynapseVars are the neuron variables representing current synaptic state,
// specifically weights.
-type SynapseVars int32
+type SynapseVars int32 //enums:enum
const (
// Wt is effective synaptic weight value, determining how much conductance one spike drives on the receiving neuron, representing the actual number of effective AMPA receptors in the synapse. Wt = SWt * WtSig(LWt), where WtSig produces values between 0-2 based on LWt, centered on 1.
@@ -51,18 +30,79 @@ const (
// DSWt is change in SWt slow synaptic weight -- accumulates DWt
DSWt
- SynapseVarsN
+ // IMPORTANT: if DSWt is not the last, need to update gosl defn below
+)
+
+// SynapseCaVars are synapse variables for calcium involved in learning,
+// which are data parallel input specific.
+type SynapseCaVars int32 //enums:enum
+
+const (
+ // CaM is first stage running average (mean) Ca calcium level (like CaM = calmodulin), feeds into CaP
+ CaM SynapseCaVars = iota
+
+ // CaP is shorter timescale integrated CaM value, representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule
+ CaP
+
+ // CaD is longer timescale integrated CaP value, representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule
+ CaD
+
+ // CaUpT is time in CyclesTotal of last updating of Ca values at the synapse level, for optimized synaptic-level Ca integration -- converted to / from uint32
+ CaUpT
+
+ // Tr is trace of synaptic activity over time -- used for credit assignment in learning. In MatrixPrjn this is a tag that is then updated later when US occurs.
+ Tr
+
+ // DTr is delta (change in) Tr trace of synaptic activity over time
+ DTr
+
+ // DiDWt is delta weight for each data parallel index (Di) -- this is directly computed from the Ca values (in cortical version) and then aggregated into the overall DWt (which may be further integrated across MPI nodes), which then drives changes in Wt values
+ DiDWt
+
+ // IMPORTANT: if DiDWt is not the last, need to update gosl defn below
+)
+
+// SynapseIdxs are the neuron indexes and other uint32 values (flags, etc).
+// There is only one of these per neuron -- not data parallel.
+type SynapseIdxs int32 //enums:enum
+
+const (
+ // SynRecvIdx is receiving neuron index in network's global list of neurons
+ SynRecvIdx SynapseIdxs = iota
+
+ // SynSendIdx is sending neuron index in network's global list of neurons
+ SynSendIdx
+
+ // SynPrjnIdx is projection index in global list of projections organized as [Layers][RecvPrjns]
+ SynPrjnIdx
+
+ // IMPORTANT: if SynPrjnIdx is not the last, need to update gosl defn below
)
+//gosl: end synapse
+
+//gosl: hlsl synapse
+/*
+static const SynapseVars SynapseVarsN = DSWt + 1;
+static const SynapseCaVars SynapseCaVarsN = DiDWt + 1;
+static const SynapseIdxs SynapseIdxsN = SynPrjnIdx + 1;
+*/
+//gosl: end synapse
+
+//gosl: start synapse
+
+////////////////////////////////////////////////
+// Strides
+
// SynapseVarStrides encodes the stride offsets for synapse variable access
// into network float32 array.
type SynapseVarStrides struct {
// synapse level
- Synapse uint32 `desc:"synapse level"`
+ Synapse uint32
// variable level
- Var uint32 `desc:"variable level"`
+ Var uint32
pad, pad1 uint32
}
@@ -92,44 +132,15 @@ func (ns *SynapseVarStrides) SetVarOuter(nsyn int) {
////////////////////////////////////////////////
// SynapseCaVars
-// SynapseCaVars are synapse variables for calcium involved in learning,
-// which are data parallel input specific.
-type SynapseCaVars int32
-
-const (
- // CaM is first stage running average (mean) Ca calcium level (like CaM = calmodulin), feeds into CaP
- CaM SynapseCaVars = iota
-
- // CaP is shorter timescale integrated CaM value, representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule
- CaP
-
- // CaD is longer timescale integrated CaP value, representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule
- CaD
-
- // CaUpT is time in CyclesTotal of last updating of Ca values at the synapse level, for optimized synaptic-level Ca integration -- converted to / from uint32
- CaUpT
-
- // Tr is trace of synaptic activity over time -- used for credit assignment in learning. In MatrixPrjn this is a tag that is then updated later when US occurs.
- Tr
-
- // DTr is delta (change in) Tr trace of synaptic activity over time
- DTr
-
- // DiDWt is delta weight for each data parallel index (Di) -- this is directly computed from the Ca values (in cortical version) and then aggregated into the overall DWt (which may be further integrated across MPI nodes), which then drives changes in Wt values
- DiDWt
-
- SynapseCaVarsN
-)
-
// SynapseCaStrides encodes the stride offsets for synapse variable access
// into network float32 array. Data is always the inner-most variable.
type SynapseCaStrides struct {
// synapse level
- Synapse uint64 `desc:"synapse level"`
+ Synapse uint64
// variable level
- Var uint64 `desc:"variable level"`
+ Var uint64
}
// Idx returns the index into network float32 array for given synapse, data, and variable
@@ -154,32 +165,15 @@ func (ns *SynapseCaStrides) SetVarOuter(nsyn, ndata int) {
////////////////////////////////////////////////
// Idxs
-// SynapseIdxs are the neuron indexes and other uint32 values (flags, etc).
-// There is only one of these per neuron -- not data parallel.
-type SynapseIdxs int32
-
-const (
- // SynRecvIdx is receiving neuron index in network's global list of neurons
- SynRecvIdx SynapseIdxs = iota
-
- // SynSendIdx is sending neuron index in network's global list of neurons
- SynSendIdx
-
- // SynPrjnIdx is projection index in global list of projections organized as [Layers][RecvPrjns]
- SynPrjnIdx
-
- SynapseIdxsN
-)
-
// SynapseIdxStrides encodes the stride offsets for synapse index access
// into network uint32 array.
type SynapseIdxStrides struct {
// synapse level
- Synapse uint32 `desc:"synapse level"`
+ Synapse uint32
// index value level
- Index uint32 `desc:"index value level"`
+ Index uint32
pad, pad1 uint32
}
diff --git a/axon/synapsecavars_string.go b/axon/synapsecavars_string.go
deleted file mode 100644
index 3b7f03289..000000000
--- a/axon/synapsecavars_string.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Code generated by "stringer -type=SynapseCaVars"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[CaM-0]
- _ = x[CaP-1]
- _ = x[CaD-2]
- _ = x[CaUpT-3]
- _ = x[Tr-4]
- _ = x[DTr-5]
- _ = x[DiDWt-6]
- _ = x[SynapseCaVarsN-7]
-}
-
-const _SynapseCaVars_name = "CaMCaPCaDCaUpTTrDTrDiDWtSynapseCaVarsN"
-
-var _SynapseCaVars_index = [...]uint8{0, 3, 6, 9, 14, 16, 19, 24, 38}
-
-func (i SynapseCaVars) String() string {
- if i < 0 || i >= SynapseCaVars(len(_SynapseCaVars_index)-1) {
- return "SynapseCaVars(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _SynapseCaVars_name[_SynapseCaVars_index[i]:_SynapseCaVars_index[i+1]]
-}
-
-func (i *SynapseCaVars) FromString(s string) error {
- for j := 0; j < len(_SynapseCaVars_index)-1; j++ {
- if s == _SynapseCaVars_name[_SynapseCaVars_index[j]:_SynapseCaVars_index[j+1]] {
- *i = SynapseCaVars(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: SynapseCaVars")
-}
-
-var _SynapseCaVars_descMap = map[SynapseCaVars]string{
- 0: `CaM is first stage running average (mean) Ca calcium level (like CaM = calmodulin), feeds into CaP`,
- 1: `CaP is shorter timescale integrated CaM value, representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule`,
- 2: `CaD is longer timescale integrated CaP value, representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule`,
- 3: `CaUpT is time in CyclesTotal of last updating of Ca values at the synapse level, for optimized synaptic-level Ca integration -- converted to / from uint32`,
- 4: `Tr is trace of synaptic activity over time -- used for credit assignment in learning. In MatrixPrjn this is a tag that is then updated later when US occurs.`,
- 5: `DTr is delta (change in) Tr trace of synaptic activity over time`,
- 6: `DiDWt is delta weight for each data parallel index (Di) -- this is directly computed from the Ca values (in cortical version) and then aggregated into the overall DWt (which may be further integrated across MPI nodes), which then drives changes in Wt values`,
- 7: ``,
-}
-
-func (i SynapseCaVars) Desc() string {
- if str, ok := _SynapseCaVars_descMap[i]; ok {
- return str
- }
- return "SynapseCaVars(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/synapseidxs_string.go b/axon/synapseidxs_string.go
deleted file mode 100644
index 06cb5c2ca..000000000
--- a/axon/synapseidxs_string.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Code generated by "stringer -type=SynapseIdxs"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[SynRecvIdx-0]
- _ = x[SynSendIdx-1]
- _ = x[SynPrjnIdx-2]
- _ = x[SynapseIdxsN-3]
-}
-
-const _SynapseIdxs_name = "SynRecvIdxSynSendIdxSynPrjnIdxSynapseIdxsN"
-
-var _SynapseIdxs_index = [...]uint8{0, 10, 20, 30, 42}
-
-func (i SynapseIdxs) String() string {
- if i < 0 || i >= SynapseIdxs(len(_SynapseIdxs_index)-1) {
- return "SynapseIdxs(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _SynapseIdxs_name[_SynapseIdxs_index[i]:_SynapseIdxs_index[i+1]]
-}
-
-func (i *SynapseIdxs) FromString(s string) error {
- for j := 0; j < len(_SynapseIdxs_index)-1; j++ {
- if s == _SynapseIdxs_name[_SynapseIdxs_index[j]:_SynapseIdxs_index[j+1]] {
- *i = SynapseIdxs(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: SynapseIdxs")
-}
-
-var _SynapseIdxs_descMap = map[SynapseIdxs]string{
- 0: `SynRecvIdx is receiving neuron index in network's global list of neurons`,
- 1: `SynSendIdx is sending neuron index in network's global list of neurons`,
- 2: `SynPrjnIdx is projection index in global list of projections organized as [Layers][RecvPrjns]`,
- 3: ``,
-}
-
-func (i SynapseIdxs) Desc() string {
- if str, ok := _SynapseIdxs_descMap[i]; ok {
- return str
- }
- return "SynapseIdxs(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/synapsevars_string.go b/axon/synapsevars_string.go
deleted file mode 100644
index e97671d9f..000000000
--- a/axon/synapsevars_string.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Code generated by "stringer -type=SynapseVars"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[Wt-0]
- _ = x[LWt-1]
- _ = x[SWt-2]
- _ = x[DWt-3]
- _ = x[DSWt-4]
- _ = x[SynapseVarsN-5]
-}
-
-const _SynapseVars_name = "WtLWtSWtDWtDSWtSynapseVarsN"
-
-var _SynapseVars_index = [...]uint8{0, 2, 5, 8, 11, 15, 27}
-
-func (i SynapseVars) String() string {
- if i < 0 || i >= SynapseVars(len(_SynapseVars_index)-1) {
- return "SynapseVars(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _SynapseVars_name[_SynapseVars_index[i]:_SynapseVars_index[i+1]]
-}
-
-func (i *SynapseVars) FromString(s string) error {
- for j := 0; j < len(_SynapseVars_index)-1; j++ {
- if s == _SynapseVars_name[_SynapseVars_index[j]:_SynapseVars_index[j+1]] {
- *i = SynapseVars(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: SynapseVars")
-}
-
-var _SynapseVars_descMap = map[SynapseVars]string{
- 0: `Wt is effective synaptic weight value, determining how much conductance one spike drives on the receiving neuron, representing the actual number of effective AMPA receptors in the synapse. Wt = SWt * WtSig(LWt), where WtSig produces values between 0-2 based on LWt, centered on 1.`,
- 1: `LWt is rapidly learning, linear weight value -- learns according to the lrate specified in the connection spec. Biologically, this represents the internal biochemical processes that drive the trafficking of AMPA receptors in the synaptic density. Initially all LWt are .5, which gives 1 from WtSig function.`,
- 2: `SWt is slowly adapting structural weight value, which acts as a multiplicative scaling factor on synaptic efficacy: biologically represents the physical size and efficacy of the dendritic spine. SWt values adapt in an outer loop along with synaptic scaling, with constraints to prevent runaway positive feedback loops and maintain variance and further capacity to learn. Initial variance is all in SWt, with LWt set to .5, and scaling absorbs some of LWt into SWt.`,
- 3: `DWt is delta (change in) synaptic weight, from learning -- updates LWt which then updates Wt.`,
- 4: `DSWt is change in SWt slow synaptic weight -- accumulates DWt`,
- 5: ``,
-}
-
-func (i SynapseVars) Desc() string {
- if str, ok := _SynapseVars_descMap[i]; ok {
- return str
- }
- return "SynapseVars(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/threads.go b/axon/threads.go
index 41535a85f..692ee1410 100644
--- a/axon/threads.go
+++ b/axon/threads.go
@@ -11,9 +11,8 @@ import (
"sort"
"sync"
- "github.com/emer/emergent/timer"
- "github.com/goki/ki/atomctr"
- "github.com/goki/ki/ints"
+ "github.com/emer/emergent/v2/timer"
+ "goki.dev/glop/atomctr"
)
// Maps the given function across the [0, total) range of items, using
@@ -81,7 +80,7 @@ func (nt *NetworkBase) SetNThreads(nthr int) {
nthr = 1
}
}
- nt.NThreads = ints.MinInt(maxProcs, nthr)
+ nt.NThreads = min(maxProcs, nthr)
}
// PrjnMapSeq applies function of given name to all projections sequentially.
diff --git a/axon/threads_test.go b/axon/threads_test.go
index 46ef3794f..831f6b5d4 100644
--- a/axon/threads_test.go
+++ b/axon/threads_test.go
@@ -11,14 +11,13 @@ import (
"math/rand"
"testing"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/goki/ki/ints"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
)
const (
@@ -318,7 +317,7 @@ func generateRandomPatterns(nPats int, seed int64) *etable.Table {
{Name: "Input", Type: etensor.FLOAT32, CellShape: shape, DimNames: []string{"Y", "X"}},
{Name: "Output", Type: etensor.FLOAT32, CellShape: shape, DimNames: []string{"Y", "X"}},
}, nPats)
- numOn := ints.MaxInt((shape[0]*shape[1])/4, 1) // ensure min at least 1
+ numOn := max((shape[0]*shape[1])/4, 1) // ensure min at least 1
patgen.PermutedBinaryRows(pats.Cols[1], numOn, 1, 0)
patgen.PermutedBinaryRows(pats.Cols[2], numOn, 1, 0)
// fmt.Printf("%v\n", pats.Cols[1].(*etensor.Float32).Values)
diff --git a/axon/valencetypes_string.go b/axon/valencetypes_string.go
deleted file mode 100644
index 8a0dcf6ab..000000000
--- a/axon/valencetypes_string.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Code generated by "stringer -type=ValenceTypes"; DO NOT EDIT.
-
-package axon
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[Positive-0]
- _ = x[Negative-1]
- _ = x[ValenceTypesN-2]
-}
-
-const _ValenceTypes_name = "PositiveNegativeValenceTypesN"
-
-var _ValenceTypes_index = [...]uint8{0, 8, 16, 29}
-
-func (i ValenceTypes) String() string {
- if i < 0 || i >= ValenceTypes(len(_ValenceTypes_index)-1) {
- return "ValenceTypes(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _ValenceTypes_name[_ValenceTypes_index[i]:_ValenceTypes_index[i+1]]
-}
-
-func (i *ValenceTypes) FromString(s string) error {
- for j := 0; j < len(_ValenceTypes_index)-1; j++ {
- if s == _ValenceTypes_name[_ValenceTypes_index[j]:_ValenceTypes_index[j+1]] {
- *i = ValenceTypes(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: ValenceTypes")
-}
-
-var _ValenceTypes_descMap = map[ValenceTypes]string{
- 0: `Positive valence codes for outcomes aligned with drives / goals.`,
- 1: `Negative valence codes for harmful or aversive outcomes.`,
- 2: ``,
-}
-
-func (i ValenceTypes) Desc() string {
- if str, ok := _ValenceTypes_descMap[i]; ok {
- return str
- }
- return "ValenceTypes(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/axon/version.go b/axon/version.go
index 019bdd1cc..0cb2292df 100644
--- a/axon/version.go
+++ b/axon/version.go
@@ -1,9 +1,12 @@
-// WARNING: auto-generated by Makefile release target -- run 'make release' to update
+// Code generated by "goki version"; DO NOT EDIT.
package axon
const (
- Version = "v1.8.20"
- GitCommit = "03242218" // the commit JUST BEFORE the release
- VersionDate = "2023-10-25 20:13" // UTC
+ // Version is the version of this package being used
+ Version = "v2.0.0-dev0.0.2"
+ // GitCommit is the commit just before the latest version commit
+ GitCommit = "997f761d"
+ // VersionDate is the date-time of the latest version commit in UTC (in the format 'YYYY-MM-DD HH:MM', which is the Go format '2006-01-02 15:04')
+ VersionDate = "2023-12-21 10:17"
)
diff --git a/chans/ak.go b/chans/ak.go
index 824d100d8..6d3f157b2 100644
--- a/chans/ak.go
+++ b/chans/ak.go
@@ -4,7 +4,7 @@
package chans
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
// AKParams control an A-type K channel, which is voltage gated with maximal
// activation around -37 mV. It has two state variables, M (v-gated opening)
@@ -17,23 +17,23 @@ import "github.com/goki/mat32"
// not simulated, as in our standard axon models.
type AKParams struct {
- // [def: 1,0.1,0.01] strength of AK current
- Gbar float32 `def:"1,0.1,0.01" desc:"strength of AK current"`
+ // strength of AK current
+ Gbar float32 `def:"1,0.1,0.01"`
- // [def: 0.01446,02039] [viewif: Gbar>0] multiplier for the beta term; 0.01446 for distal, 0.02039 for proximal dendrites
- Beta float32 `viewif:"Gbar>0" def:"0.01446,02039" desc:"multiplier for the beta term; 0.01446 for distal, 0.02039 for proximal dendrites"`
+ // multiplier for the beta term; 0.01446 for distal, 0.02039 for proximal dendrites
+ Beta float32 `viewif:"Gbar>0" def:"0.01446,02039"`
- // [def: 0.5,0.25] [viewif: Gbar>0] Dm factor: 0.5 for distal, 0.25 for proximal
- Dm float32 `viewif:"Gbar>0" def:"0.5,0.25" desc:"Dm factor: 0.5 for distal, 0.25 for proximal"`
+ // Dm factor: 0.5 for distal, 0.25 for proximal
+ Dm float32 `viewif:"Gbar>0" def:"0.5,0.25"`
- // [def: 1.8,1.5] [viewif: Gbar>0] offset for K, 1.8 for distal, 1.5 for proximal
- Koff float32 `viewif:"Gbar>0" def:"1.8,1.5" desc:"offset for K, 1.8 for distal, 1.5 for proximal"`
+ // offset for K, 1.8 for distal, 1.5 for proximal
+ Koff float32 `viewif:"Gbar>0" def:"1.8,1.5"`
- // [def: 1,11] [viewif: Gbar>0] voltage offset for alpha and beta functions: 1 for distal, 11 for proximal
- Voff float32 `viewif:"Gbar>0" def:"1,11" desc:"voltage offset for alpha and beta functions: 1 for distal, 11 for proximal"`
+ // voltage offset for alpha and beta functions: 1 for distal, 11 for proximal
+ Voff float32 `viewif:"Gbar>0" def:"1,11"`
- // [def: 0.1133,0.1112] [viewif: Gbar>0] h multiplier factor, 0.1133 for distal, 0.1112 for proximal
- Hf float32 `viewif:"Gbar>0" def:"0.1133,0.1112" desc:"h multiplier factor, 0.1133 for distal, 0.1112 for proximal"`
+ // h multiplier factor, 0.1133 for distal, 0.1112 for proximal
+ Hf float32 `viewif:"Gbar>0" def:"0.1133,0.1112"`
pad, pad1 float32
}
@@ -139,19 +139,19 @@ func (ap *AKParams) Gak(m, h float32) float32 {
// voltage gated calcium channels which can otherwise drive runaway excitatory currents.
type AKsParams struct {
- // [def: 2,0.1,0.01] strength of AK current
- Gbar float32 `def:"2,0.1,0.01" desc:"strength of AK current"`
+ // strength of AK current
+ Gbar float32 `def:"2,0.1,0.01"`
- // [def: 0.076] [viewif: Gbar>0] H factor as a constant multiplier on overall M factor result -- rescales M to level consistent with H being present at full strength
- Hf float32 `viewif:"Gbar>0" def:"0.076" desc:"H factor as a constant multiplier on overall M factor result -- rescales M to level consistent with H being present at full strength"`
+ // H factor as a constant multiplier on overall M factor result -- rescales M to level consistent with H being present at full strength
+ Hf float32 `viewif:"Gbar>0" def:"0.076"`
- // [def: 0.075] [viewif: Gbar>0] multiplier for M -- determines slope of function
- Mf float32 `viewif:"Gbar>0" def:"0.075" desc:"multiplier for M -- determines slope of function"`
+ // multiplier for M -- determines slope of function
+ Mf float32 `viewif:"Gbar>0" def:"0.075"`
- // [def: 2] [viewif: Gbar>0] voltage offset in biological units for M function
- Voff float32 `viewif:"Gbar>0" def:"2" desc:"voltage offset in biological units for M function"`
+ // voltage offset in biological units for M function
+ Voff float32 `viewif:"Gbar>0" def:"2"`
- // [viewif: Gbar>0]
+ //
Vmax float32 `viewif:"Gbar>0" def:-37" desc:"voltage level of maximum channel opening -- stays flat above that"`
pad, pad1, pad2 int32
diff --git a/chans/ak_plot/ak_plot.go b/chans/ak_plot/ak_plot.go
index d465bb518..63330a8df 100644
--- a/chans/ak_plot/ak_plot.go
+++ b/chans/ak_plot/ak_plot.go
@@ -5,32 +5,30 @@
// ak_plot plots an equation updating over time in a etable.Table and Plot2D.
package main
+//go:generate goki generate -add-types
+
import (
"strconv"
"github.com/emer/axon/chans"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/icons"
)
-func main() {
- TheSim.Config()
- gimain.Main(func() { // this starts gui -- requires valid OpenGL display connection (e.g., X11)
- guirun()
- })
-}
+func main() { gimain.Run(app) }
-func guirun() {
- TheSim.VmRun()
- win := TheSim.ConfigGui()
- win.StartEventLoop()
+func app() {
+ sim := &Sim{}
+ sim.Config()
+ sim.VmRun()
+ b := sim.ConfigGUI()
+ b.NewWindow().Run().Wait()
}
// LogPrec is precision for saving float values in logs
@@ -40,57 +38,48 @@ const LogPrec = 4
type Sim struct {
// AK function
- AK chans.AKParams `desc:"AK function"`
+ AK chans.AKParams
// AKs simplified function
- AKs chans.AKsParams `desc:"AKs simplified function"`
+ AKs chans.AKsParams
- // [def: -100] starting voltage
- Vstart float32 `def:"-100" desc:"starting voltage"`
+ // starting voltage
+ Vstart float32 `def:"-100"`
- // [def: 100] ending voltage
- Vend float32 `def:"100" desc:"ending voltage"`
+ // ending voltage
+ Vend float32 `def:"100"`
- // [def: 1] voltage increment
- Vstep float32 `def:"1" desc:"voltage increment"`
+ // voltage increment
+ Vstep float32 `def:"1"`
// number of time steps
- TimeSteps int `desc:"number of time steps"`
+ TimeSteps int
// do spiking instead of voltage ramp
- TimeSpike bool `desc:"do spiking instead of voltage ramp"`
+ TimeSpike bool
// spiking frequency
- SpikeFreq float32 `desc:"spiking frequency"`
+ SpikeFreq float32
// time-run starting membrane potential
- TimeVstart float32 `desc:"time-run starting membrane potential"`
+ TimeVstart float32
// time-run ending membrane potential
- TimeVend float32 `desc:"time-run ending membrane potential"`
+ TimeVend float32
- // [view: no-inline] table for plot
- Table *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ Table *etable.Table `view:"no-inline"`
- // [view: -] the plot
- Plot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ Plot *eplot.Plot2D `view:"-"`
- // [view: no-inline] table for plot
- TimeTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ TimeTable *etable.Table `view:"no-inline"`
- // [view: -] the plot
- TimePlot *eplot.Plot2D `view:"-" desc:"the plot"`
-
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
-
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the plot
+ TimePlot *eplot.Plot2D `view:"-"`
}
-// TheSim is the overall state for this simulation
-var TheSim Sim
-
// Config configures all the elements using the standard functions
func (ss *Sim) Config() {
ss.AK.Defaults()
@@ -117,7 +106,7 @@ func (ss *Sim) Update() {
}
// VmRun plots the equation as a function of V
-func (ss *Sim) VmRun() {
+func (ss *Sim) VmRun() { //gti:add
ss.Update()
dt := ss.Table
@@ -153,7 +142,9 @@ func (ss *Sim) VmRun() {
dt.SetCellFloat("Ms", vi, float64(ms))
dt.SetCellFloat("Gaks", vi, float64(gs))
}
- ss.Plot.Update()
+ if ss.Plot != nil {
+ ss.Plot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTable(dt *etable.Table) {
@@ -199,7 +190,7 @@ func (ss *Sim) ConfigPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D {
/////////////////////////////////////////////////////////////////
// TimeRun runs the equation over time.
-func (ss *Sim) TimeRun() {
+func (ss *Sim) TimeRun() { //gti:add
ss.Update()
dt := ss.TimeTable
@@ -258,7 +249,9 @@ func (ss *Sim) TimeRun() {
}
}
}
- ss.TimePlot.Update()
+ if ss.TimePlot != nil {
+ ss.TimePlot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTimeTable(dt *etable.Table) {
@@ -302,73 +295,28 @@ func (ss *Sim) ConfigTimePlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D
return plt
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
- width := 1600
- height := 1200
-
- // gi.WinEventTrace = true
-
- gi.SetAppName("ak_plot")
- gi.SetAppAbout(`This plots an equation. See emergent on GitHub.
`)
-
- win := gi.NewMainWindow("ak_plot", "Plotting Equations", width, height)
- ss.Win = win
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() *gi.Body {
+ b := gi.NewAppBody("ak_plot").SetTitle("Plotting Equations")
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
-
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMax()
-
- sv := giv.AddNewStructView(split, "sv")
+ split := gi.NewSplits(b, "split")
+ sv := giv.NewStructView(split, "sv")
sv.SetStruct(ss)
- tv := gi.AddNewTabView(split, "tv")
+ tv := gi.NewTabs(split, "tv")
- plt := tv.AddNewTab(eplot.KiT_Plot2D, "V-G Plot").(*eplot.Plot2D)
- ss.Plot = ss.ConfigPlot(plt, ss.Table)
+ ss.Plot = eplot.NewSubPlot(tv.NewTab("V-G Plot"))
+ ss.ConfigPlot(ss.Plot, ss.Table)
- plt = tv.AddNewTab(eplot.KiT_Plot2D, "TimePlot").(*eplot.Plot2D)
- ss.TimePlot = ss.ConfigTimePlot(plt, ss.TimeTable)
+ ss.TimePlot = eplot.NewSubPlot(tv.NewTab("TimePlot"))
+ ss.ConfigTimePlot(ss.TimePlot, ss.TimeTable)
split.SetSplits(.3, .7)
- tbar.AddAction(gi.ActOpts{Label: "V-G Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.VmRun()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Time Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.TimeRun()
- vp.SetNeedsFullRender()
+ b.AddAppBar(func(tb *gi.Toolbar) {
+ giv.NewFuncButton(tb, ss.VmRun).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.TimeRun).SetIcon(icons.PlayArrow)
})
- tbar.AddAction(gi.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send ki.Ki, sig int64, data interface{}) {
- gi.OpenURL("https://github.com/emer/axon/blob/master/chans/ak_plot/README.md")
- })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.MainMenuUpdated()
- return win
+ return b
}
diff --git a/chans/ak_plot/gtigen.go b/chans/ak_plot/gtigen.go
new file mode 100644
index 000000000..629f203b3
--- /dev/null
+++ b/chans/ak_plot/gtigen.go
@@ -0,0 +1,41 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim holds the params, table, etc",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"AK", >i.Field{Name: "AK", Type: "github.com/emer/axon/chans.AKParams", LocalType: "chans.AKParams", Doc: "AK function", Directives: gti.Directives{}, Tag: ""}},
+ {"AKs", >i.Field{Name: "AKs", Type: "github.com/emer/axon/chans.AKsParams", LocalType: "chans.AKsParams", Doc: "AKs simplified function", Directives: gti.Directives{}, Tag: ""}},
+ {"Vstart", >i.Field{Name: "Vstart", Type: "float32", LocalType: "float32", Doc: "starting voltage", Directives: gti.Directives{}, Tag: "def:\"-100\""}},
+ {"Vend", >i.Field{Name: "Vend", Type: "float32", LocalType: "float32", Doc: "ending voltage", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"Vstep", >i.Field{Name: "Vstep", Type: "float32", LocalType: "float32", Doc: "voltage increment", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"TimeSteps", >i.Field{Name: "TimeSteps", Type: "int", LocalType: "int", Doc: "number of time steps", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeSpike", >i.Field{Name: "TimeSpike", Type: "bool", LocalType: "bool", Doc: "do spiking instead of voltage ramp", Directives: gti.Directives{}, Tag: ""}},
+ {"SpikeFreq", >i.Field{Name: "SpikeFreq", Type: "float32", LocalType: "float32", Doc: "spiking frequency", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeVstart", >i.Field{Name: "TimeVstart", Type: "float32", LocalType: "float32", Doc: "time-run starting membrane potential", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeVend", >i.Field{Name: "TimeVend", Type: "float32", LocalType: "float32", Doc: "time-run ending membrane potential", Directives: gti.Directives{}, Tag: ""}},
+ {"Table", >i.Field{Name: "Table", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Plot", >i.Field{Name: "Plot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TimeTable", >i.Field{Name: "TimeTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TimePlot", >i.Field{Name: "TimePlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{
+ {"VmRun", >i.Method{Name: "VmRun", Doc: "VmRun plots the equation as a function of V", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"TimeRun", >i.Method{Name: "TimeRun", Doc: "TimeRun runs the equation over time.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ }),
+})
diff --git a/chans/chans.go b/chans/chans.go
index 5aeb84005..f61fed669 100644
--- a/chans/chans.go
+++ b/chans/chans.go
@@ -10,6 +10,8 @@ Includes excitatory, leak, inhibition, and dynamic potassium channels.
*/
package chans
+//go:generate goki generate -add-types
+
//gosl: hlsl chans
// #include "fastexp.hlsl"
//gosl: end chans
@@ -20,16 +22,16 @@ package chans
type Chans struct {
// excitatory sodium (Na) AMPA channels activated by synaptic glutamate
- E float32 `desc:"excitatory sodium (Na) AMPA channels activated by synaptic glutamate"`
+ E float32
// constant leak (potassium, K+) channels -- determines resting potential (typically higher than resting potential of K)
- L float32 `desc:"constant leak (potassium, K+) channels -- determines resting potential (typically higher than resting potential of K)"`
+ L float32
// inhibitory chloride (Cl-) channels activated by synaptic GABA
- I float32 `desc:"inhibitory chloride (Cl-) channels activated by synaptic GABA"`
+ I float32
// gated / active potassium channels -- typically hyperpolarizing relative to leak / rest
- K float32 `desc:"gated / active potassium channels -- typically hyperpolarizing relative to leak / rest"`
+ K float32
}
// SetAll sets all the values
diff --git a/chans/gabab.go b/chans/gabab.go
index 626a2a1eb..37cf70e53 100644
--- a/chans/gabab.go
+++ b/chans/gabab.go
@@ -5,7 +5,7 @@
package chans
import (
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: start chans
@@ -14,32 +14,32 @@ import (
// based on Brunel & Wang (2001) parameters.
type GABABParams struct {
- // [def: 0,0.012,0.015] overall strength multiplier of GABA-B current. The 0.015 default is a high value that works well in smaller networks -- larger networks may benefit from lower levels (e.g., 0.012).
- Gbar float32 `def:"0,0.012,0.015" desc:"overall strength multiplier of GABA-B current. The 0.015 default is a high value that works well in smaller networks -- larger networks may benefit from lower levels (e.g., 0.012)."`
+ // overall strength multiplier of GABA-B current. The 0.015 default is a high value that works well in smaller networks -- larger networks may benefit from lower levels (e.g., 0.012).
+ Gbar float32 `def:"0,0.012,0.015"`
- // [def: 45] [viewif: Gbar>0] rise time for bi-exponential time dynamics of GABA-B
- RiseTau float32 `viewif:"Gbar>0" def:"45" desc:"rise time for bi-exponential time dynamics of GABA-B"`
+ // rise time for bi-exponential time dynamics of GABA-B
+ RiseTau float32 `viewif:"Gbar>0" def:"45"`
- // [def: 50] [viewif: Gbar>0] decay time for bi-exponential time dynamics of GABA-B
- DecayTau float32 `viewif:"Gbar>0" def:"50" desc:"decay time for bi-exponential time dynamics of GABA-B"`
+ // decay time for bi-exponential time dynamics of GABA-B
+ DecayTau float32 `viewif:"Gbar>0" def:"50"`
- // [def: 0.2] [viewif: Gbar>0] baseline level of GABA-B channels open independent of inhibitory input (is added to spiking-produced conductance)
- Gbase float32 `viewif:"Gbar>0" def:"0.2" desc:"baseline level of GABA-B channels open independent of inhibitory input (is added to spiking-produced conductance)"`
+ // baseline level of GABA-B channels open independent of inhibitory input (is added to spiking-produced conductance)
+ Gbase float32 `viewif:"Gbar>0" def:"0.2"`
- // [def: 10] [viewif: Gbar>0] multiplier for converting Gi to equivalent GABA spikes
- GiSpike float32 `viewif:"Gbar>0" def:"10" desc:"multiplier for converting Gi to equivalent GABA spikes"`
+ // multiplier for converting Gi to equivalent GABA spikes
+ GiSpike float32 `viewif:"Gbar>0" def:"10"`
- // [viewif: Gbar>0] time offset when peak conductance occurs, in msec, computed from RiseTau and DecayTau
- MaxTime float32 `viewif:"Gbar>0" inactive:"+" desc:"time offset when peak conductance occurs, in msec, computed from RiseTau and DecayTau"`
+ // time offset when peak conductance occurs, in msec, computed from RiseTau and DecayTau
+ MaxTime float32 `viewif:"Gbar>0" inactive:"+"`
- // [view: -] time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))
- TauFact float32 `view:"-" desc:"time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))"`
+ // time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))
+ TauFact float32 `view:"-"`
- // [view: -] 1/Tau
- RiseDt float32 `view:"-" inactive:"+" desc:"1/Tau"`
+ // 1/Tau
+ RiseDt float32 `view:"-" inactive:"+"`
- // [view: -] 1/Tau
- DecayDt float32 `view:"-" inactive:"+" desc:"1/Tau"`
+ // 1/Tau
+ DecayDt float32 `view:"-" inactive:"+"`
pad, pad1, pad2 float32
}
diff --git a/chans/gabab_plot/gabab_plot.go b/chans/gabab_plot/gabab_plot.go
index 9e6ffadf7..0d6bcfebc 100644
--- a/chans/gabab_plot/gabab_plot.go
+++ b/chans/gabab_plot/gabab_plot.go
@@ -5,34 +5,33 @@
// gabab_plot plots an equation updating over time in a etable.Table and Plot2D.
package main
+//go:generate goki generate -add-types
+
import (
"math"
"strconv"
"github.com/emer/axon/chans"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/icons"
+ "goki.dev/mat32/v2"
)
-func main() {
- TheSim.Config()
- gimain.Main(func() { // this starts gui -- requires valid OpenGL display connection (e.g., X11)
- guirun()
- })
-}
+func main() { gimain.Run(app) }
-func guirun() {
- TheSim.VGRun()
- TheSim.SGRun()
- win := TheSim.ConfigGui()
- win.StartEventLoop()
+func app() {
+ sim := &Sim{}
+ sim.Config()
+ sim.VGRun()
+ sim.SGRun()
+ b := sim.ConfigGUI()
+ b.NewWindow().Run().Wait()
}
// LogPrec is precision for saving float values in logs
@@ -42,78 +41,69 @@ const LogPrec = 4
type Sim struct {
// standard chans version of GABAB
- GABAstd chans.GABABParams `desc:"standard chans version of GABAB"`
+ GABAstd chans.GABABParams
- // [def: 0.1] multiplier on GABAb as function of voltage
- GABAbv float64 `def:"0.1" desc:"multiplier on GABAb as function of voltage"`
+ // multiplier on GABAb as function of voltage
+ GABAbv float64 `def:"0.1"`
- // [def: 10] offset of GABAb function
- GABAbo float64 `def:"10" desc:"offset of GABAb function"`
+ // offset of GABAb function
+ GABAbo float64 `def:"10"`
- // [def: -90] GABAb reversal / driving potential
- GABAberev float64 `def:"-90" desc:"GABAb reversal / driving potential"`
+ // GABAb reversal / driving potential
+ GABAberev float64 `def:"-90"`
- // [def: -90] starting voltage
- Vstart float64 `def:"-90" desc:"starting voltage"`
+ // starting voltage
+ Vstart float64 `def:"-90"`
- // [def: 0] ending voltage
- Vend float64 `def:"0" desc:"ending voltage"`
+ // ending voltage
+ Vend float64 `def:"0"`
- // [def: 1] voltage increment
- Vstep float64 `def:"1" desc:"voltage increment"`
+ // voltage increment
+ Vstep float64 `def:"1"`
- // [def: 15] max number of spikes
- Smax int `def:"15" desc:"max number of spikes"`
+ // max number of spikes
+ Smax int `def:"15"`
// rise time constant
- RiseTau float64 `desc:"rise time constant"`
+ RiseTau float64
// decay time constant -- must NOT be same as RiseTau
- DecayTau float64 `desc:"decay time constant -- must NOT be same as RiseTau"`
+ DecayTau float64
// initial value of GsX driving variable at point of synaptic input onset -- decays expoentially from this start
- GsXInit float64 `desc:"initial value of GsX driving variable at point of synaptic input onset -- decays expoentially from this start"`
+ GsXInit float64
// time when peak conductance occurs, in TimeInc units
- MaxTime float64 `inactive:"+" desc:"time when peak conductance occurs, in TimeInc units"`
+ MaxTime float64 `inactive:"+"`
// time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))
- TauFact float64 `inactive:"+" desc:"time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))"`
+ TauFact float64 `inactive:"+"`
// total number of time steps to take
- TimeSteps int `desc:"total number of time steps to take"`
+ TimeSteps int
// time increment per step
- TimeInc float64 `desc:"time increment per step"`
-
- // [view: no-inline] table for plot
- VGTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ TimeInc float64
- // [view: no-inline] table for plot
- SGTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ VGTable *etable.Table `view:"no-inline"`
- // [view: no-inline] table for plot
- TimeTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ SGTable *etable.Table `view:"no-inline"`
- // [view: -] the plot
- VGPlot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // table for plot
+ TimeTable *etable.Table `view:"no-inline"`
- // [view: -] the plot
- SGPlot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ VGPlot *eplot.Plot2D `view:"-"`
- // [view: -] the plot
- TimePlot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ SGPlot *eplot.Plot2D `view:"-"`
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
-
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the plot
+ TimePlot *eplot.Plot2D `view:"-"`
}
-// TheSim is the overall state for this simulation
-var TheSim Sim
-
// Config configures all the elements using the standard functions
func (ss *Sim) Config() {
ss.GABAstd.Defaults()
@@ -149,7 +139,7 @@ func (ss *Sim) Update() {
}
// VGRun runs the V-G equation.
-func (ss *Sim) VGRun() {
+func (ss *Sim) VGRun() { //gti:add
ss.Update()
dt := ss.VGTable
@@ -169,7 +159,9 @@ func (ss *Sim) VGRun() {
dt.SetCellFloat("GgabaB_std", vi, float64(gs))
dt.SetCellFloat("GgabaB_bug", vi, float64(gbug))
}
- ss.VGPlot.Update()
+ if ss.VGPlot != nil {
+ ss.VGPlot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigVGTable(dt *etable.Table) {
@@ -200,7 +192,7 @@ func (ss *Sim) ConfigVGPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D {
//////////////////////////////////////////////////
// SGRun runs the spike-g equation.
-func (ss *Sim) SGRun() {
+func (ss *Sim) SGRun() { //gti:add
ss.Update()
dt := ss.SGTable
@@ -217,7 +209,9 @@ func (ss *Sim) SGRun() {
dt.SetCellFloat("GgabaB_max", si, g)
dt.SetCellFloat("GgabaBstd_max", si, float64(gs))
}
- ss.SGPlot.Update()
+ if ss.SGPlot != nil {
+ ss.SGPlot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigSGTable(dt *etable.Table) {
@@ -247,7 +241,7 @@ func (ss *Sim) ConfigSGPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D {
//////////////////////////////////////////////////
// TimeRun runs the equation.
-func (ss *Sim) TimeRun() {
+func (ss *Sim) TimeRun() { //gti:add
ss.Update()
dt := ss.TimeTable
@@ -281,7 +275,9 @@ func (ss *Sim) TimeRun() {
time += ss.TimeInc
}
- ss.TimePlot.Update()
+ if ss.TimePlot != nil {
+ ss.TimePlot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTimeTable(dt *etable.Table) {
@@ -314,81 +310,32 @@ func (ss *Sim) ConfigTimePlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D
return plt
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
- width := 1600
- height := 1200
-
- // gi.WinEventTrace = true
-
- gi.SetAppName("gabab_plot")
- gi.SetAppAbout(`This plots an equation. See emergent on GitHub.`)
-
- win := gi.NewMainWindow("gababplot", "Plotting Equations", width, height)
- ss.Win = win
-
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() *gi.Body {
+ b := gi.NewAppBody("gabab_plot").SetTitle("Plotting Equations")
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMax()
-
- sv := giv.AddNewStructView(split, "sv")
+ split := gi.NewSplits(b, "split")
+ sv := giv.NewStructView(split, "sv")
sv.SetStruct(ss)
- tv := gi.AddNewTabView(split, "tv")
+ tv := gi.NewTabs(split, "tv")
- plt := tv.AddNewTab(eplot.KiT_Plot2D, "VGPlot").(*eplot.Plot2D)
- ss.VGPlot = ss.ConfigVGPlot(plt, ss.VGTable)
+ ss.VGPlot = eplot.NewSubPlot(tv.NewTab("V-G Plot"))
+ ss.ConfigVGPlot(ss.VGPlot, ss.VGTable)
- plt = tv.AddNewTab(eplot.KiT_Plot2D, "SGPlot").(*eplot.Plot2D)
- ss.SGPlot = ss.ConfigSGPlot(plt, ss.SGTable)
+ ss.SGPlot = eplot.NewSubPlot(tv.NewTab("S-G Plot"))
+ ss.ConfigSGPlot(ss.SGPlot, ss.SGTable)
- plt = tv.AddNewTab(eplot.KiT_Plot2D, "TimePlot").(*eplot.Plot2D)
- ss.TimePlot = ss.ConfigTimePlot(plt, ss.TimeTable)
+ ss.TimePlot = eplot.NewSubPlot(tv.NewTab("TimePlot"))
+ ss.ConfigTimePlot(ss.TimePlot, ss.TimeTable)
split.SetSplits(.3, .7)
- tbar.AddAction(gi.ActOpts{Label: "Run VG", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.VGRun()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Run SG", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.SGRun()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Run Time", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.TimeRun()
- vp.SetNeedsFullRender()
+ b.AddAppBar(func(tb *gi.Toolbar) {
+ giv.NewFuncButton(tb, ss.VGRun).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.SGRun).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.TimeRun).SetIcon(icons.PlayArrow)
})
- tbar.AddAction(gi.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send ki.Ki, sig int64, data interface{}) {
- gi.OpenURL("https://github.com/emer/axon/blob/master/chans/gabab_plot/README.md")
- })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.MainMenuUpdated()
- return win
+ return b
}
diff --git a/chans/gabab_plot/gtigen.go b/chans/gabab_plot/gtigen.go
new file mode 100644
index 000000000..2b63ff514
--- /dev/null
+++ b/chans/gabab_plot/gtigen.go
@@ -0,0 +1,51 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim holds the params, table, etc",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GABAstd", >i.Field{Name: "GABAstd", Type: "github.com/emer/axon/chans.GABABParams", LocalType: "chans.GABABParams", Doc: "standard chans version of GABAB", Directives: gti.Directives{}, Tag: ""}},
+ {"GABAbv", >i.Field{Name: "GABAbv", Type: "float64", LocalType: "float64", Doc: "multiplier on GABAb as function of voltage", Directives: gti.Directives{}, Tag: "def:\"0.1\""}},
+ {"GABAbo", >i.Field{Name: "GABAbo", Type: "float64", LocalType: "float64", Doc: "offset of GABAb function", Directives: gti.Directives{}, Tag: "def:\"10\""}},
+ {"GABAberev", >i.Field{Name: "GABAberev", Type: "float64", LocalType: "float64", Doc: "GABAb reversal / driving potential", Directives: gti.Directives{}, Tag: "def:\"-90\""}},
+ {"Vstart", >i.Field{Name: "Vstart", Type: "float64", LocalType: "float64", Doc: "starting voltage", Directives: gti.Directives{}, Tag: "def:\"-90\""}},
+ {"Vend", >i.Field{Name: "Vend", Type: "float64", LocalType: "float64", Doc: "ending voltage", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Vstep", >i.Field{Name: "Vstep", Type: "float64", LocalType: "float64", Doc: "voltage increment", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"Smax", >i.Field{Name: "Smax", Type: "int", LocalType: "int", Doc: "max number of spikes", Directives: gti.Directives{}, Tag: "def:\"15\""}},
+ {"RiseTau", >i.Field{Name: "RiseTau", Type: "float64", LocalType: "float64", Doc: "rise time constant", Directives: gti.Directives{}, Tag: ""}},
+ {"DecayTau", >i.Field{Name: "DecayTau", Type: "float64", LocalType: "float64", Doc: "decay time constant -- must NOT be same as RiseTau", Directives: gti.Directives{}, Tag: ""}},
+ {"GsXInit", >i.Field{Name: "GsXInit", Type: "float64", LocalType: "float64", Doc: "initial value of GsX driving variable at point of synaptic input onset -- decays expoentially from this start", Directives: gti.Directives{}, Tag: ""}},
+ {"MaxTime", >i.Field{Name: "MaxTime", Type: "float64", LocalType: "float64", Doc: "time when peak conductance occurs, in TimeInc units", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"TauFact", >i.Field{Name: "TauFact", Type: "float64", LocalType: "float64", Doc: "time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"TimeSteps", >i.Field{Name: "TimeSteps", Type: "int", LocalType: "int", Doc: "total number of time steps to take", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeInc", >i.Field{Name: "TimeInc", Type: "float64", LocalType: "float64", Doc: "time increment per step", Directives: gti.Directives{}, Tag: ""}},
+ {"VGTable", >i.Field{Name: "VGTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"SGTable", >i.Field{Name: "SGTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TimeTable", >i.Field{Name: "TimeTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"VGPlot", >i.Field{Name: "VGPlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"SGPlot", >i.Field{Name: "SGPlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TimePlot", >i.Field{Name: "TimePlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{
+ {"VGRun", >i.Method{Name: "VGRun", Doc: "VGRun runs the V-G equation.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"SGRun", >i.Method{Name: "SGRun", Doc: "SGRun runs the spike-g equation.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"TimeRun", >i.Method{Name: "TimeRun", Doc: "TimeRun runs the equation.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ }),
+})
diff --git a/chans/gtigen.go b/chans/gtigen.go
new file mode 100644
index 000000000..069589ed9
--- /dev/null
+++ b/chans/gtigen.go
@@ -0,0 +1,237 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package chans
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.AKParams",
+ ShortName: "chans.AKParams",
+ IDName: "ak-params",
+ Doc: "AKParams control an A-type K channel, which is voltage gated with maximal\nactivation around -37 mV. It has two state variables, M (v-gated opening)\nand H (v-gated closing), which integrate with fast and slow time constants,\nrespectively. H relatively quickly hits an asymptotic level of inactivation\nfor sustained activity patterns.\nIt is particularly important for counteracting the excitatory effects of\nvoltage gated calcium channels which can otherwise drive runaway excitatory currents.\nSee AKsParams for a much simpler version that works fine when full AP-like spikes are\nnot simulated, as in our standard axon models.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Gbar", >i.Field{Name: "Gbar", Type: "float32", LocalType: "float32", Doc: "strength of AK current", Directives: gti.Directives{}, Tag: "def:\"1,0.1,0.01\""}},
+ {"Beta", >i.Field{Name: "Beta", Type: "float32", LocalType: "float32", Doc: "multiplier for the beta term; 0.01446 for distal, 0.02039 for proximal dendrites", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.01446,02039\""}},
+ {"Dm", >i.Field{Name: "Dm", Type: "float32", LocalType: "float32", Doc: "Dm factor: 0.5 for distal, 0.25 for proximal", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.5,0.25\""}},
+ {"Koff", >i.Field{Name: "Koff", Type: "float32", LocalType: "float32", Doc: "offset for K, 1.8 for distal, 1.5 for proximal", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"1.8,1.5\""}},
+ {"Voff", >i.Field{Name: "Voff", Type: "float32", LocalType: "float32", Doc: "voltage offset for alpha and beta functions: 1 for distal, 11 for proximal", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"1,11\""}},
+ {"Hf", >i.Field{Name: "Hf", Type: "float32", LocalType: "float32", Doc: "h multiplier factor, 0.1133 for distal, 0.1112 for proximal", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.1133,0.1112\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.AKsParams",
+ ShortName: "chans.AKsParams",
+ IDName: "a-ks-params",
+ Doc: "AKsParams provides a highly simplified stateless A-type K channel\nthat only has the voltage-gated activation (M) dynamic with a cutoff\nthat ends up capturing a close approximation to the much more complex AK function.\nThis is voltage gated with maximal activation around -37 mV.\nIt is particularly important for counteracting the excitatory effects of\nvoltage gated calcium channels which can otherwise drive runaway excitatory currents.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"chans"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Gbar", >i.Field{Name: "Gbar", Type: "float32", LocalType: "float32", Doc: "strength of AK current", Directives: gti.Directives{}, Tag: "def:\"2,0.1,0.01\""}},
+ {"Hf", >i.Field{Name: "Hf", Type: "float32", LocalType: "float32", Doc: "H factor as a constant multiplier on overall M factor result -- rescales M to level consistent with H being present at full strength", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.076\""}},
+ {"Mf", >i.Field{Name: "Mf", Type: "float32", LocalType: "float32", Doc: "multiplier for M -- determines slope of function", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.075\""}},
+ {"Voff", >i.Field{Name: "Voff", Type: "float32", LocalType: "float32", Doc: "voltage offset in biological units for M function", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"2\""}},
+ {"Vmax", >i.Field{Name: "Vmax", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:-37\" desc:\"voltage level of maximum channel opening -- stays flat above that\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.Chans",
+ ShortName: "chans.Chans",
+ IDName: "chans",
+ Doc: "Chans are ion channels used in computing point-neuron activation function",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "go", Directive: "generate", Args: []string{"goki", "generate", "-add-types"}},
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"chans"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"chans"}},
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"chans"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"E", >i.Field{Name: "E", Type: "float32", LocalType: "float32", Doc: "excitatory sodium (Na) AMPA channels activated by synaptic glutamate", Directives: gti.Directives{}, Tag: ""}},
+ {"L", >i.Field{Name: "L", Type: "float32", LocalType: "float32", Doc: "constant leak (potassium, K+) channels -- determines resting potential (typically higher than resting potential of K)", Directives: gti.Directives{}, Tag: ""}},
+ {"I", >i.Field{Name: "I", Type: "float32", LocalType: "float32", Doc: "inhibitory chloride (Cl-) channels activated by synaptic GABA", Directives: gti.Directives{}, Tag: ""}},
+ {"K", >i.Field{Name: "K", Type: "float32", LocalType: "float32", Doc: "gated / active potassium channels -- typically hyperpolarizing relative to leak / rest", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.GABABParams",
+ ShortName: "chans.GABABParams",
+ IDName: "gabab-params",
+ Doc: "GABABParams control the GABAB dynamics in PFC Maint neurons,\nbased on Brunel & Wang (2001) parameters.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"chans"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Gbar", >i.Field{Name: "Gbar", Type: "float32", LocalType: "float32", Doc: "overall strength multiplier of GABA-B current. The 0.015 default is a high value that works well in smaller networks -- larger networks may benefit from lower levels (e.g., 0.012).", Directives: gti.Directives{}, Tag: "def:\"0,0.012,0.015\""}},
+ {"RiseTau", >i.Field{Name: "RiseTau", Type: "float32", LocalType: "float32", Doc: "rise time for bi-exponential time dynamics of GABA-B", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"45\""}},
+ {"DecayTau", >i.Field{Name: "DecayTau", Type: "float32", LocalType: "float32", Doc: "decay time for bi-exponential time dynamics of GABA-B", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"50\""}},
+ {"Gbase", >i.Field{Name: "Gbase", Type: "float32", LocalType: "float32", Doc: "baseline level of GABA-B channels open independent of inhibitory input (is added to spiking-produced conductance)", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.2\""}},
+ {"GiSpike", >i.Field{Name: "GiSpike", Type: "float32", LocalType: "float32", Doc: "multiplier for converting Gi to equivalent GABA spikes", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"10\""}},
+ {"MaxTime", >i.Field{Name: "MaxTime", Type: "float32", LocalType: "float32", Doc: "time offset when peak conductance occurs, in msec, computed from RiseTau and DecayTau", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" inactive:\"+\""}},
+ {"TauFact", >i.Field{Name: "TauFact", Type: "float32", LocalType: "float32", Doc: "time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RiseDt", >i.Field{Name: "RiseDt", Type: "float32", LocalType: "float32", Doc: "1/Tau", Directives: gti.Directives{}, Tag: "view:\"-\" inactive:\"+\""}},
+ {"DecayDt", >i.Field{Name: "DecayDt", Type: "float32", LocalType: "float32", Doc: "1/Tau", Directives: gti.Directives{}, Tag: "view:\"-\" inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.KNaParams",
+ ShortName: "chans.KNaParams",
+ IDName: "k-na-params",
+ Doc: "KNaParams implements sodium (Na) gated potassium (K) currents\nthat drive adaptation (accommodation) in neural firing.\nAs neurons spike, driving an influx of Na, this activates\nthe K channels, which, like leak channels, pull the membrane\npotential back down toward rest (or even below).",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"chans"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "if On, use this component of K-Na adaptation", Directives: gti.Directives{}, Tag: ""}},
+ {"Rise", >i.Field{Name: "Rise", Type: "float32", LocalType: "float32", Doc: "Rise rate of fast time-scale adaptation as function of Na concentration due to spiking -- directly multiplies -- 1/rise = tau for rise rate", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"Max", >i.Field{Name: "Max", Type: "float32", LocalType: "float32", Doc: "Maximum potential conductance of fast K channels -- divide nA biological value by 10 for the normalized units here", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"Tau", >i.Field{Name: "Tau", Type: "float32", LocalType: "float32", Doc: "time constant in cycles for decay of adaptation, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life)", Directives: gti.Directives{}, Tag: "viewif:\"On\""}},
+ {"Dt", >i.Field{Name: "Dt", Type: "float32", LocalType: "float32", Doc: "1/Tau rate constant", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.KNaMedSlow",
+ ShortName: "chans.KNaMedSlow",
+ IDName: "k-na-med-slow",
+ Doc: "KNaMedSlow describes sodium-gated potassium channel adaptation mechanism.\nEvidence supports 2 different time constants:\nSlick (medium) and Slack (slow)",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "if On, apply K-Na adaptation", Directives: gti.Directives{}, Tag: ""}},
+ {"TrialSlow", >i.Field{Name: "TrialSlow", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "engages an optional version of Slow that discretely turns on at start of new trial (NewState): nrn.GknaSlow += Slow.Max * nrn.SpkPrv -- achieves a strong form of adaptation", Directives: gti.Directives{}, Tag: ""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"Med", >i.Field{Name: "Med", Type: "github.com/emer/axon/chans.KNaParams", LocalType: "KNaParams", Doc: "medium time-scale adaptation", Directives: gti.Directives{}, Tag: "viewif:\"On\" view:\"inline\""}},
+ {"Slow", >i.Field{Name: "Slow", Type: "github.com/emer/axon/chans.KNaParams", LocalType: "KNaParams", Doc: "slow time-scale adaptation", Directives: gti.Directives{}, Tag: "viewif:\"On\" view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.MahpParams",
+ ShortName: "chans.MahpParams",
+ IDName: "mahp-params",
+ Doc: "MahpParams implements an M-type medium afterhyperpolarizing (mAHP) channel,\nwhere m also stands for muscarinic due to the ACh inactivation of this channel.\nIt has a slow activation and deactivation time constant, and opens at a lowish\nmembrane potential.\nThere is one gating variable n updated over time with a tau that is also voltage dependent.\nThe infinite-time value of n is voltage dependent according to a logistic function\nof the membrane potential, centered at Voff with slope Vslope.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"chans"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Gbar", >i.Field{Name: "Gbar", Type: "float32", LocalType: "float32", Doc: "strength of mAHP current", Directives: gti.Directives{}, Tag: ""}},
+ {"Voff", >i.Field{Name: "Voff", Type: "float32", LocalType: "float32", Doc: "voltage offset (threshold) in biological units for infinite time N gating function -- where the gate is at 50% strength", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"-30\""}},
+ {"Vslope", >i.Field{Name: "Vslope", Type: "float32", LocalType: "float32", Doc: "slope of the arget (infinite time) gating function", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"9\""}},
+ {"TauMax", >i.Field{Name: "TauMax", Type: "float32", LocalType: "float32", Doc: "maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"1000\""}},
+ {"Tadj", >i.Field{Name: "Tadj", Type: "float32", LocalType: "float32", Doc: "temperature adjustment factor: assume temp = 37 C, whereas original units were at 23 C", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" view:\"-\" inactive:\"+\""}},
+ {"DtMax", >i.Field{Name: "DtMax", Type: "float32", LocalType: "float32", Doc: "1/Tau", Directives: gti.Directives{}, Tag: "view:\"-\" inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.NMDAParams",
+ ShortName: "chans.NMDAParams",
+ IDName: "nmda-params",
+ Doc: "NMDAParams control the NMDA dynamics, based on Jahr & Stevens (1990) equations\nwhich are widely used in models, from Brunel & Wang (2001) to Sanders et al. (2013).\nThe overall conductance is a function of a voltage-dependent postsynaptic factor based\non Mg ion blockage, and presynaptic Glu-based opening, which in a simple model just\nincrements",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"chans"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Gbar", >i.Field{Name: "Gbar", Type: "float32", LocalType: "float32", Doc: "overall multiplier for strength of NMDA current -- multiplies GnmdaSyn to get net conductance.", Directives: gti.Directives{}, Tag: "def:\"0,0.006,0.007\""}},
+ {"Tau", >i.Field{Name: "Tau", Type: "float32", LocalType: "float32", Doc: "decay time constant for NMDA channel activation -- rise time is 2 msec and not worth extra effort for biexponential. 30 fits the Urakubo et al (2008) model with ITau = 100, but 100 works better in practice is small networks so far.", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"30,50,100,200,300\""}},
+ {"ITau", >i.Field{Name: "ITau", Type: "float32", LocalType: "float32", Doc: "decay time constant for NMDA channel inhibition, which captures the Urakubo et al (2008) allosteric dynamics (100 fits their model well) -- set to 1 to eliminate that mechanism.", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"1,100\""}},
+ {"MgC", >i.Field{Name: "MgC", Type: "float32", LocalType: "float32", Doc: "magnesium ion concentration: Brunel & Wang (2001) and Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990). Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far in large models, 1.2, Voff = 0 best in smaller nets.", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"1:1.5\""}},
+ {"Voff", >i.Field{Name: "Voff", Type: "float32", LocalType: "float32", Doc: "offset in membrane potential in biological units for voltage-dependent functions. 5 corresponds to the -65 mV rest, -45 threshold of the Urakubo et al (2008) model. 5 was used before in a buggy version of NMDA equation -- 0 is new default.", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0\""}},
+ {"Dt", >i.Field{Name: "Dt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"IDt", >i.Field{Name: "IDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"MgFact", >i.Field{Name: "MgFact", Type: "float32", LocalType: "float32", Doc: "MgFact = MgC / 3.57", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.SahpParams",
+ ShortName: "chans.SahpParams",
+ IDName: "sahp-params",
+ Doc: "SahpParams implements a slow afterhyperpolarizing (sAHP) channel,\nIt has a slowly accumulating calcium value, aggregated at the\ntheta cycle level, that then drives the logistic gating function,\nso that it only activates after a significant accumulation.\nAfter which point it decays.\nFor the theta-cycle updating, the normal m-type tau is all within\nthe scope of a single theta cycle, so we just omit the time integration\nof the n gating value, but tau is computed in any case.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"chans"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Gbar", >i.Field{Name: "Gbar", Type: "float32", LocalType: "float32", Doc: "strength of sAHP current", Directives: gti.Directives{}, Tag: "def:\"0.05,0.1\""}},
+ {"CaTau", >i.Field{Name: "CaTau", Type: "float32", LocalType: "float32", Doc: "time constant for integrating Ca across theta cycles", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"5,10\""}},
+ {"Off", >i.Field{Name: "Off", Type: "float32", LocalType: "float32", Doc: "integrated Ca offset (threshold) for infinite time N gating function -- where the gate is at 50% strength", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.8\""}},
+ {"Slope", >i.Field{Name: "Slope", Type: "float32", LocalType: "float32", Doc: "slope of the infinite time logistic gating function", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.02\""}},
+ {"TauMax", >i.Field{Name: "TauMax", Type: "float32", LocalType: "float32", Doc: "maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"1\""}},
+ {"CaDt", >i.Field{Name: "CaDt", Type: "float32", LocalType: "float32", Doc: "1/Tau", Directives: gti.Directives{}, Tag: "view:\"-\" inactive:\"+\""}},
+ {"DtMax", >i.Field{Name: "DtMax", Type: "float32", LocalType: "float32", Doc: "1/Tau", Directives: gti.Directives{}, Tag: "view:\"-\" inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.SKCaParams",
+ ShortName: "chans.SKCaParams",
+ IDName: "sk-ca-params",
+ Doc: "SKCaParams describes the small-conductance calcium-activated potassium channel,\nactivated by intracellular stores in a way that drives pauses in firing,\nand can require inactivity to recharge the Ca available for release.\nThese intracellular stores can release quickly, have a slow decay once released,\nand the stores can take a while to rebuild, leading to rapidly triggered,\nlong-lasting pauses that don't recur until stores have rebuilt, which is the\nobserved pattern of firing of STNp pausing neurons.\nCaIn = intracellular stores available for release; CaR = released amount from stores\nCaM = K channel conductance gating factor driven by CaR binding,\ncomputed using the Hill equations described in Fujita et al (2012), Gunay et al (2008)\n(also Muddapu & Chakravarthy, 2021): X^h / (X^h + C50^h) where h ~= 4 (hard coded)",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"chans"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Gbar", >i.Field{Name: "Gbar", Type: "float32", LocalType: "float32", Doc: "overall strength of sKCa current -- inactive if 0", Directives: gti.Directives{}, Tag: "def:\"0,2,3\""}},
+ {"C50", >i.Field{Name: "C50", Type: "float32", LocalType: "float32", Doc: "50% Ca concentration baseline value in Hill equation -- set this to level that activates at reasonable levels of SKCaR", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.4,0.5\""}},
+ {"ActTau", >i.Field{Name: "ActTau", Type: "float32", LocalType: "float32", Doc: "K channel gating factor activation time constant -- roughly 5-15 msec in literature", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"15\""}},
+ {"DeTau", >i.Field{Name: "DeTau", Type: "float32", LocalType: "float32", Doc: "K channel gating factor deactivation time constant -- roughly 30-50 msec in literature", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"30\""}},
+ {"KCaR", >i.Field{Name: "KCaR", Type: "float32", LocalType: "float32", Doc: "proportion of CaIn intracellular stores that are released per spike, going into CaR", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.4,0.8\""}},
+ {"CaRDecayTau", >i.Field{Name: "CaRDecayTau", Type: "float32", LocalType: "float32", Doc: "SKCaR released calcium decay time constant", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"150,200\""}},
+ {"CaInThr", >i.Field{Name: "CaInThr", Type: "float32", LocalType: "float32", Doc: "level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge.", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"0.01\""}},
+ {"CaInTau", >i.Field{Name: "CaInTau", Type: "float32", LocalType: "float32", Doc: "time constant in msec for storing CaIn when activity is below CaInThr", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"50\""}},
+ {"ActDt", >i.Field{Name: "ActDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"DeDt", >i.Field{Name: "DeDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"CaRDecayDt", >i.Field{Name: "CaRDecayDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"CaInDt", >i.Field{Name: "CaInDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/chans.VGCCParams",
+ ShortName: "chans.VGCCParams",
+ IDName: "vgcc-params",
+ Doc: "VGCCParams control the standard L-type Ca channel\nAll functions based on Urakubo et al (2008).\nSource code available at http://kurodalab.bs.s.u-tokyo.ac.jp/info/STDP/Urakubo2008.tar.gz.\nIn particular look at the file MODEL/Poirazi_cell/CaL.g.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"chans"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Gbar", >i.Field{Name: "Gbar", Type: "float32", LocalType: "float32", Doc: "strength of VGCC current -- 0.12 value is from Urakubo et al (2008) model -- best fits actual model behavior using axon equations (1.5 nominal in that model), 0.02 works better in practice for not getting stuck in high plateau firing", Directives: gti.Directives{}, Tag: "def:\"0.02,0.12\""}},
+ {"Ca", >i.Field{Name: "Ca", Type: "float32", LocalType: "float32", Doc: "calcium from conductance factor -- important for learning contribution of VGCC", Directives: gti.Directives{}, Tag: "viewif:\"Gbar>0\" def:\"25\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/chans/kna.go b/chans/kna.go
index 4539b84da..0a30615c8 100644
--- a/chans/kna.go
+++ b/chans/kna.go
@@ -4,7 +4,7 @@
package chans
-import "github.com/goki/gosl/slbool"
+import "goki.dev/gosl/v2/slbool"
//gosl: start chans
@@ -16,19 +16,19 @@ import "github.com/goki/gosl/slbool"
type KNaParams struct {
// if On, use this component of K-Na adaptation
- On slbool.Bool `desc:"if On, use this component of K-Na adaptation"`
+ On slbool.Bool
- // [viewif: On] Rise rate of fast time-scale adaptation as function of Na concentration due to spiking -- directly multiplies -- 1/rise = tau for rise rate
- Rise float32 `viewif:"On" desc:"Rise rate of fast time-scale adaptation as function of Na concentration due to spiking -- directly multiplies -- 1/rise = tau for rise rate"`
+ // Rise rate of fast time-scale adaptation as function of Na concentration due to spiking -- directly multiplies -- 1/rise = tau for rise rate
+ Rise float32 `viewif:"On"`
- // [viewif: On] Maximum potential conductance of fast K channels -- divide nA biological value by 10 for the normalized units here
- Max float32 `viewif:"On" desc:"Maximum potential conductance of fast K channels -- divide nA biological value by 10 for the normalized units here"`
+ // Maximum potential conductance of fast K channels -- divide nA biological value by 10 for the normalized units here
+ Max float32 `viewif:"On"`
- // [viewif: On] time constant in cycles for decay of adaptation, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life)
- Tau float32 `viewif:"On" desc:"time constant in cycles for decay of adaptation, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life)"`
+ // time constant in cycles for decay of adaptation, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life)
+ Tau float32 `viewif:"On"`
- // [view: -] 1/Tau rate constant
- Dt float32 `view:"-" desc:"1/Tau rate constant"`
+ // 1/Tau rate constant
+ Dt float32 `view:"-"`
pad, pad1, pad2 int32
}
@@ -64,18 +64,18 @@ func (ka *KNaParams) GcFmSpike(gKNa *float32, spike bool) {
type KNaMedSlow struct {
// if On, apply K-Na adaptation
- On slbool.Bool `desc:"if On, apply K-Na adaptation"`
+ On slbool.Bool
// engages an optional version of Slow that discretely turns on at start of new trial (NewState): nrn.GknaSlow += Slow.Max * nrn.SpkPrv -- achieves a strong form of adaptation
- TrialSlow slbool.Bool `desc:"engages an optional version of Slow that discretely turns on at start of new trial (NewState): nrn.GknaSlow += Slow.Max * nrn.SpkPrv -- achieves a strong form of adaptation"`
+ TrialSlow slbool.Bool
pad, pad1 int32
- // [view: inline] [viewif: On] medium time-scale adaptation
- Med KNaParams `viewif:"On" view:"inline" desc:"medium time-scale adaptation"`
+ // medium time-scale adaptation
+ Med KNaParams `viewif:"On" view:"inline"`
- // [view: inline] [viewif: On] slow time-scale adaptation
- Slow KNaParams `viewif:"On" view:"inline" desc:"slow time-scale adaptation"`
+ // slow time-scale adaptation
+ Slow KNaParams `viewif:"On" view:"inline"`
}
func (ka *KNaMedSlow) Defaults() {
diff --git a/chans/mahp.go b/chans/mahp.go
index b7560211c..06d0da11d 100644
--- a/chans/mahp.go
+++ b/chans/mahp.go
@@ -4,7 +4,7 @@
package chans
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
//gosl: start chans
@@ -18,22 +18,22 @@ import "github.com/goki/mat32"
type MahpParams struct {
// strength of mAHP current
- Gbar float32 `desc:"strength of mAHP current"`
+ Gbar float32
- // [def: -30] [viewif: Gbar>0] voltage offset (threshold) in biological units for infinite time N gating function -- where the gate is at 50% strength
- Voff float32 `viewif:"Gbar>0" def:"-30" desc:"voltage offset (threshold) in biological units for infinite time N gating function -- where the gate is at 50% strength"`
+ // voltage offset (threshold) in biological units for infinite time N gating function -- where the gate is at 50% strength
+ Voff float32 `viewif:"Gbar>0" def:"-30"`
- // [def: 9] [viewif: Gbar>0] slope of the arget (infinite time) gating function
- Vslope float32 `viewif:"Gbar>0" def:"9" desc:"slope of the arget (infinite time) gating function"`
+ // slope of the arget (infinite time) gating function
+ Vslope float32 `viewif:"Gbar>0" def:"9"`
- // [def: 1000] [viewif: Gbar>0] maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp
- TauMax float32 `viewif:"Gbar>0" def:"1000" desc:"maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp"`
+ // maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp
+ TauMax float32 `viewif:"Gbar>0" def:"1000"`
- // [view: -] [viewif: Gbar>0] temperature adjustment factor: assume temp = 37 C, whereas original units were at 23 C
- Tadj float32 `viewif:"Gbar>0" view:"-" inactive:"+" desc:"temperature adjustment factor: assume temp = 37 C, whereas original units were at 23 C"`
+ // temperature adjustment factor: assume temp = 37 C, whereas original units were at 23 C
+ Tadj float32 `viewif:"Gbar>0" view:"-" inactive:"+"`
- // [view: -] 1/Tau
- DtMax float32 `view:"-" inactive:"+" desc:"1/Tau"`
+ // 1/Tau
+ DtMax float32 `view:"-" inactive:"+"`
pad, pad2 int32
}
diff --git a/chans/mahp_plot/gtigen.go b/chans/mahp_plot/gtigen.go
new file mode 100644
index 000000000..28d114390
--- /dev/null
+++ b/chans/mahp_plot/gtigen.go
@@ -0,0 +1,40 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim holds the params, table, etc",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Mahp", >i.Field{Name: "Mahp", Type: "github.com/emer/axon/chans.MahpParams", LocalType: "chans.MahpParams", Doc: "mAHP function", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Vstart", >i.Field{Name: "Vstart", Type: "float32", LocalType: "float32", Doc: "starting voltage", Directives: gti.Directives{}, Tag: "def:\"-100\""}},
+ {"Vend", >i.Field{Name: "Vend", Type: "float32", LocalType: "float32", Doc: "ending voltage", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"Vstep", >i.Field{Name: "Vstep", Type: "float32", LocalType: "float32", Doc: "voltage increment", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"TimeSteps", >i.Field{Name: "TimeSteps", Type: "int", LocalType: "int", Doc: "number of time steps", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeSpike", >i.Field{Name: "TimeSpike", Type: "bool", LocalType: "bool", Doc: "do spiking instead of voltage ramp", Directives: gti.Directives{}, Tag: ""}},
+ {"SpikeFreq", >i.Field{Name: "SpikeFreq", Type: "float32", LocalType: "float32", Doc: "spiking frequency", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeVstart", >i.Field{Name: "TimeVstart", Type: "float32", LocalType: "float32", Doc: "time-run starting membrane potential", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeVend", >i.Field{Name: "TimeVend", Type: "float32", LocalType: "float32", Doc: "time-run ending membrane potential", Directives: gti.Directives{}, Tag: ""}},
+ {"Table", >i.Field{Name: "Table", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Plot", >i.Field{Name: "Plot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TimeTable", >i.Field{Name: "TimeTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TimePlot", >i.Field{Name: "TimePlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{
+ {"VmRun", >i.Method{Name: "VmRun", Doc: "VmRun plots the equation as a function of V", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"TimeRun", >i.Method{Name: "TimeRun", Doc: "TimeRun runs the equation over time.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ }),
+})
diff --git a/chans/mahp_plot/mahp_plot.go b/chans/mahp_plot/mahp_plot.go
index dd8e91a24..85f732a56 100644
--- a/chans/mahp_plot/mahp_plot.go
+++ b/chans/mahp_plot/mahp_plot.go
@@ -5,32 +5,30 @@
// mahp_plot plots an equation updating over time in a etable.Table and Plot2D.
package main
+//go:generate goki generate -add-types
+
import (
"strconv"
"github.com/emer/axon/chans"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/icons"
)
-func main() {
- TheSim.Config()
- gimain.Main(func() { // this starts gui -- requires valid OpenGL display connection (e.g., X11)
- guirun()
- })
-}
+func main() { gimain.Run(app) }
-func guirun() {
- TheSim.VmRun()
- win := TheSim.ConfigGui()
- win.StartEventLoop()
+func app() {
+ sim := &Sim{}
+ sim.Config()
+ sim.VmRun()
+ b := sim.ConfigGUI()
+ b.NewWindow().Run().Wait()
}
// LogPrec is precision for saving float values in logs
@@ -39,55 +37,46 @@ const LogPrec = 4
// Sim holds the params, table, etc
type Sim struct {
- // [view: inline] mAHP function
- Mahp chans.MahpParams `view:"inline" desc:"mAHP function"`
+ // mAHP function
+ Mahp chans.MahpParams `view:"inline"`
- // [def: -100] starting voltage
- Vstart float32 `def:"-100" desc:"starting voltage"`
+ // starting voltage
+ Vstart float32 `def:"-100"`
- // [def: 100] ending voltage
- Vend float32 `def:"100" desc:"ending voltage"`
+ // ending voltage
+ Vend float32 `def:"100"`
- // [def: 1] voltage increment
- Vstep float32 `def:"1" desc:"voltage increment"`
+ // voltage increment
+ Vstep float32 `def:"1"`
// number of time steps
- TimeSteps int `desc:"number of time steps"`
+ TimeSteps int
// do spiking instead of voltage ramp
- TimeSpike bool `desc:"do spiking instead of voltage ramp"`
+ TimeSpike bool
// spiking frequency
- SpikeFreq float32 `desc:"spiking frequency"`
+ SpikeFreq float32
// time-run starting membrane potential
- TimeVstart float32 `desc:"time-run starting membrane potential"`
+ TimeVstart float32
// time-run ending membrane potential
- TimeVend float32 `desc:"time-run ending membrane potential"`
+ TimeVend float32
- // [view: no-inline] table for plot
- Table *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ Table *etable.Table `view:"no-inline"`
- // [view: -] the plot
- Plot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ Plot *eplot.Plot2D `view:"-"`
- // [view: no-inline] table for plot
- TimeTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ TimeTable *etable.Table `view:"no-inline"`
- // [view: -] the plot
- TimePlot *eplot.Plot2D `view:"-" desc:"the plot"`
-
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
-
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the plot
+ TimePlot *eplot.Plot2D `view:"-"`
}
-// TheSim is the overall state for this simulation
-var TheSim Sim
-
// Config configures all the elements using the standard functions
func (ss *Sim) Config() {
ss.Mahp.Defaults()
@@ -112,7 +101,7 @@ func (ss *Sim) Update() {
}
// VmRun plots the equation as a function of V
-func (ss *Sim) VmRun() {
+func (ss *Sim) VmRun() { //gti:add
ss.Update()
dt := ss.Table
@@ -129,7 +118,9 @@ func (ss *Sim) VmRun() {
dt.SetCellFloat("Ninf", vi, float64(ninf))
dt.SetCellFloat("Tau", vi, float64(tau))
}
- ss.Plot.Update()
+ if ss.Plot != nil {
+ ss.Plot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTable(dt *etable.Table) {
@@ -159,7 +150,7 @@ func (ss *Sim) ConfigPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D {
/////////////////////////////////////////////////////////////////
// TimeRun runs the equation over time.
-func (ss *Sim) TimeRun() {
+func (ss *Sim) TimeRun() { //gti:add
ss.Update()
dt := ss.TimeTable
@@ -210,7 +201,9 @@ func (ss *Sim) TimeRun() {
}
n += dn
}
- ss.TimePlot.Update()
+ if ss.TimePlot != nil {
+ ss.TimePlot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTimeTable(dt *etable.Table) {
@@ -247,73 +240,28 @@ func (ss *Sim) ConfigTimePlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D
return plt
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
- width := 1600
- height := 1200
-
- // gi.WinEventTrace = true
-
- gi.SetAppName("mahp_plot")
- gi.SetAppAbout(`This plots an equation. See emergent on GitHub.`)
-
- win := gi.NewMainWindow("mahp_plot", "Plotting Equations", width, height)
- ss.Win = win
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() *gi.Body {
+ b := gi.NewAppBody("mahp_plot").SetTitle("Plotting Equations")
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
-
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMax()
-
- sv := giv.AddNewStructView(split, "sv")
+ split := gi.NewSplits(b, "split")
+ sv := giv.NewStructView(split, "sv")
sv.SetStruct(ss)
- tv := gi.AddNewTabView(split, "tv")
+ tv := gi.NewTabs(split, "tv")
- plt := tv.AddNewTab(eplot.KiT_Plot2D, "V-G Plot").(*eplot.Plot2D)
- ss.Plot = ss.ConfigPlot(plt, ss.Table)
+ ss.Plot = eplot.NewSubPlot(tv.NewTab("V-G Plot"))
+ ss.ConfigPlot(ss.Plot, ss.Table)
- plt = tv.AddNewTab(eplot.KiT_Plot2D, "TimePlot").(*eplot.Plot2D)
- ss.TimePlot = ss.ConfigTimePlot(plt, ss.TimeTable)
+ ss.TimePlot = eplot.NewSubPlot(tv.NewTab("TimePlot"))
+ ss.ConfigTimePlot(ss.TimePlot, ss.TimeTable)
split.SetSplits(.3, .7)
- tbar.AddAction(gi.ActOpts{Label: "V-G Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.VmRun()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Time Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.TimeRun()
- vp.SetNeedsFullRender()
+ b.AddAppBar(func(tb *gi.Toolbar) {
+ giv.NewFuncButton(tb, ss.VmRun).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.TimeRun).SetIcon(icons.PlayArrow)
})
- tbar.AddAction(gi.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send ki.Ki, sig int64, data interface{}) {
- gi.OpenURL("https://github.com/emer/axon/blob/master/chans/mahp_plot/README.md")
- })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.MainMenuUpdated()
- return win
+ return b
}
diff --git a/chans/nmda.go b/chans/nmda.go
index dcb93f520..160391721 100644
--- a/chans/nmda.go
+++ b/chans/nmda.go
@@ -4,7 +4,7 @@
package chans
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
//gosl: start chans
@@ -15,29 +15,29 @@ import "github.com/goki/mat32"
// increments
type NMDAParams struct {
- // [def: 0,0.006,0.007] overall multiplier for strength of NMDA current -- multiplies GnmdaSyn to get net conductance.
- Gbar float32 `def:"0,0.006,0.007" desc:"overall multiplier for strength of NMDA current -- multiplies GnmdaSyn to get net conductance."`
+ // overall multiplier for strength of NMDA current -- multiplies GnmdaSyn to get net conductance.
+ Gbar float32 `def:"0,0.006,0.007"`
- // [def: 30,50,100,200,300] [viewif: Gbar>0] decay time constant for NMDA channel activation -- rise time is 2 msec and not worth extra effort for biexponential. 30 fits the Urakubo et al (2008) model with ITau = 100, but 100 works better in practice is small networks so far.
- Tau float32 `viewif:"Gbar>0" def:"30,50,100,200,300" desc:"decay time constant for NMDA channel activation -- rise time is 2 msec and not worth extra effort for biexponential. 30 fits the Urakubo et al (2008) model with ITau = 100, but 100 works better in practice is small networks so far."`
+ // decay time constant for NMDA channel activation -- rise time is 2 msec and not worth extra effort for biexponential. 30 fits the Urakubo et al (2008) model with ITau = 100, but 100 works better in practice is small networks so far.
+ Tau float32 `viewif:"Gbar>0" def:"30,50,100,200,300"`
- // [def: 1,100] [viewif: Gbar>0] decay time constant for NMDA channel inhibition, which captures the Urakubo et al (2008) allosteric dynamics (100 fits their model well) -- set to 1 to eliminate that mechanism.
- ITau float32 `viewif:"Gbar>0" def:"1,100" desc:"decay time constant for NMDA channel inhibition, which captures the Urakubo et al (2008) allosteric dynamics (100 fits their model well) -- set to 1 to eliminate that mechanism."`
+ // decay time constant for NMDA channel inhibition, which captures the Urakubo et al (2008) allosteric dynamics (100 fits their model well) -- set to 1 to eliminate that mechanism.
+ ITau float32 `viewif:"Gbar>0" def:"1,100"`
- // [def: 1:1.5] [viewif: Gbar>0] magnesium ion concentration: Brunel & Wang (2001) and Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990). Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far in large models, 1.2, Voff = 0 best in smaller nets.
- MgC float32 `viewif:"Gbar>0" def:"1:1.5" desc:"magnesium ion concentration: Brunel & Wang (2001) and Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990). Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far in large models, 1.2, Voff = 0 best in smaller nets."`
+ // magnesium ion concentration: Brunel & Wang (2001) and Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990). Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far in large models, 1.2, Voff = 0 best in smaller nets.
+ MgC float32 `viewif:"Gbar>0" def:"1:1.5"`
- // [def: 0] [viewif: Gbar>0] offset in membrane potential in biological units for voltage-dependent functions. 5 corresponds to the -65 mV rest, -45 threshold of the Urakubo et al (2008) model. 5 was used before in a buggy version of NMDA equation -- 0 is new default.
- Voff float32 `viewif:"Gbar>0" def:"0" desc:"offset in membrane potential in biological units for voltage-dependent functions. 5 corresponds to the -65 mV rest, -45 threshold of the Urakubo et al (2008) model. 5 was used before in a buggy version of NMDA equation -- 0 is new default."`
+ // offset in membrane potential in biological units for voltage-dependent functions. 5 corresponds to the -65 mV rest, -45 threshold of the Urakubo et al (2008) model. 5 was used before in a buggy version of NMDA equation -- 0 is new default.
+ Voff float32 `viewif:"Gbar>0" def:"0"`
- // [view: -] rate = 1 / tau
- Dt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ Dt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- IDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ IDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] MgFact = MgC / 3.57
- MgFact float32 `view:"-" json:"-" xml:"-" desc:"MgFact = MgC / 3.57"`
+ // MgFact = MgC / 3.57
+ MgFact float32 `view:"-" json:"-" xml:"-"`
}
func (np *NMDAParams) Defaults() {
diff --git a/chans/nmda_plot/gtigen.go b/chans/nmda_plot/gtigen.go
new file mode 100644
index 000000000..b9a3c82fe
--- /dev/null
+++ b/chans/nmda_plot/gtigen.go
@@ -0,0 +1,44 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim holds the params, table, etc",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"NMDAStd", >i.Field{Name: "NMDAStd", Type: "github.com/emer/axon/chans.NMDAParams", LocalType: "chans.NMDAParams", Doc: "standard NMDA implementation in chans", Directives: gti.Directives{}, Tag: ""}},
+ {"NMDAv", >i.Field{Name: "NMDAv", Type: "float64", LocalType: "float64", Doc: "multiplier on NMDA as function of voltage", Directives: gti.Directives{}, Tag: "def:\"0.062\""}},
+ {"MgC", >i.Field{Name: "MgC", Type: "float64", LocalType: "float64", Doc: "magnesium ion concentration -- somewhere between 1 and 1.5", Directives: gti.Directives{}, Tag: ""}},
+ {"NMDAd", >i.Field{Name: "NMDAd", Type: "float64", LocalType: "float64", Doc: "denominator of NMDA function", Directives: gti.Directives{}, Tag: "def:\"3.57\""}},
+ {"NMDAerev", >i.Field{Name: "NMDAerev", Type: "float64", LocalType: "float64", Doc: "NMDA reversal / driving potential", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"BugVoff", >i.Field{Name: "BugVoff", Type: "float64", LocalType: "float64", Doc: "for old buggy NMDA: voff value to use", Directives: gti.Directives{}, Tag: ""}},
+ {"Vstart", >i.Field{Name: "Vstart", Type: "float64", LocalType: "float64", Doc: "starting voltage", Directives: gti.Directives{}, Tag: "def:\"-90\""}},
+ {"Vend", >i.Field{Name: "Vend", Type: "float64", LocalType: "float64", Doc: "ending voltage", Directives: gti.Directives{}, Tag: "def:\"10\""}},
+ {"Vstep", >i.Field{Name: "Vstep", Type: "float64", LocalType: "float64", Doc: "voltage increment", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"Tau", >i.Field{Name: "Tau", Type: "float64", LocalType: "float64", Doc: "decay time constant for NMDA current -- rise time is 2 msec and not worth extra effort for biexponential", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"TimeSteps", >i.Field{Name: "TimeSteps", Type: "int", LocalType: "int", Doc: "number of time steps", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeV", >i.Field{Name: "TimeV", Type: "float64", LocalType: "float64", Doc: "voltage for TimeRun", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeGin", >i.Field{Name: "TimeGin", Type: "float64", LocalType: "float64", Doc: "NMDA Gsyn current input at every time step", Directives: gti.Directives{}, Tag: ""}},
+ {"Table", >i.Field{Name: "Table", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Plot", >i.Field{Name: "Plot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TimeTable", >i.Field{Name: "TimeTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TimePlot", >i.Field{Name: "TimePlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{
+ {"Run", >i.Method{Name: "Run", Doc: "Run runs the equation.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"TimeRun", >i.Method{Name: "TimeRun", Doc: "TimeRun runs the equation over time.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ }),
+})
diff --git a/chans/nmda_plot/nmda_plot.go b/chans/nmda_plot/nmda_plot.go
index 82124a64f..0b2dbc3ce 100644
--- a/chans/nmda_plot/nmda_plot.go
+++ b/chans/nmda_plot/nmda_plot.go
@@ -5,33 +5,32 @@
// nmda_plot plots an equation updating over time in a etable.Table and Plot2D.
package main
+//go:generate goki generate -add-types
+
import (
"math"
"strconv"
"github.com/emer/axon/chans"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/icons"
+ "goki.dev/mat32/v2"
)
-func main() {
- TheSim.Config()
- gimain.Main(func() { // this starts gui
- guirun()
- })
-}
+func main() { gimain.Run(app) }
-func guirun() {
- TheSim.Run()
- win := TheSim.ConfigGui()
- win.StartEventLoop()
+func app() {
+ sim := &Sim{}
+ sim.Config()
+ sim.Run()
+ b := sim.ConfigGUI()
+ b.NewWindow().Run().Wait()
}
// LogPrec is precision for saving float values in logs
@@ -41,66 +40,57 @@ const LogPrec = 4
type Sim struct {
// standard NMDA implementation in chans
- NMDAStd chans.NMDAParams `desc:"standard NMDA implementation in chans"`
+ NMDAStd chans.NMDAParams
- // [def: 0.062] multiplier on NMDA as function of voltage
- NMDAv float64 `def:"0.062" desc:"multiplier on NMDA as function of voltage"`
+ // multiplier on NMDA as function of voltage
+ NMDAv float64 `def:"0.062"`
// magnesium ion concentration -- somewhere between 1 and 1.5
- MgC float64 `desc:"magnesium ion concentration -- somewhere between 1 and 1.5"`
+ MgC float64
- // [def: 3.57] denominator of NMDA function
- NMDAd float64 `def:"3.57" desc:"denominator of NMDA function"`
+ // denominator of NMDA function
+ NMDAd float64 `def:"3.57"`
- // [def: 0] NMDA reversal / driving potential
- NMDAerev float64 `def:"0" desc:"NMDA reversal / driving potential"`
+ // NMDA reversal / driving potential
+ NMDAerev float64 `def:"0"`
// for old buggy NMDA: voff value to use
- BugVoff float64 `desc:"for old buggy NMDA: voff value to use"`
+ BugVoff float64
- // [def: -90] starting voltage
- Vstart float64 `def:"-90" desc:"starting voltage"`
+ // starting voltage
+ Vstart float64 `def:"-90"`
- // [def: 10] ending voltage
- Vend float64 `def:"10" desc:"ending voltage"`
+ // ending voltage
+ Vend float64 `def:"10"`
- // [def: 1] voltage increment
- Vstep float64 `def:"1" desc:"voltage increment"`
+ // voltage increment
+ Vstep float64 `def:"1"`
- // [def: 100] decay time constant for NMDA current -- rise time is 2 msec and not worth extra effort for biexponential
- Tau float64 `def:"100" desc:"decay time constant for NMDA current -- rise time is 2 msec and not worth extra effort for biexponential"`
+ // decay time constant for NMDA current -- rise time is 2 msec and not worth extra effort for biexponential
+ Tau float64 `def:"100"`
// number of time steps
- TimeSteps int `desc:"number of time steps"`
+ TimeSteps int
// voltage for TimeRun
- TimeV float64 `desc:"voltage for TimeRun"`
+ TimeV float64
// NMDA Gsyn current input at every time step
- TimeGin float64 `desc:"NMDA Gsyn current input at every time step"`
+ TimeGin float64
- // [view: no-inline] table for plot
- Table *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ Table *etable.Table `view:"no-inline"`
- // [view: -] the plot
- Plot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ Plot *eplot.Plot2D `view:"-"`
- // [view: no-inline] table for plot
- TimeTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ TimeTable *etable.Table `view:"no-inline"`
- // [view: -] the plot
- TimePlot *eplot.Plot2D `view:"-" desc:"the plot"`
-
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
-
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the plot
+ TimePlot *eplot.Plot2D `view:"-"`
}
-// TheSim is the overall state for this simulation
-var TheSim Sim
-
// Config configures all the elements using the standard functions
func (ss *Sim) Config() {
ss.NMDAStd.Defaults()
@@ -132,7 +122,7 @@ func (ss *Sim) Update() {
// https://brian2.readthedocs.io/en/stable/examples/frompapers.Brunel_Wang_2001.html
// Run runs the equation.
-func (ss *Sim) Run() {
+func (ss *Sim) Run() { //gti:add
ss.Update()
dt := ss.Table
@@ -166,7 +156,9 @@ func (ss *Sim) Run() {
dt.SetCellFloat("Gnmda_bug", vi, float64(gbug))
dt.SetCellFloat("Ca", vi, float64(ca))
}
- ss.Plot.Update()
+ if ss.Plot != nil {
+ ss.Plot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTable(dt *etable.Table) {
@@ -200,7 +192,7 @@ func (ss *Sim) ConfigPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D {
/////////////////////////////////////////////////////////////////
// TimeRun runs the equation over time.
-func (ss *Sim) TimeRun() {
+func (ss *Sim) TimeRun() { //gti:add
ss.Update()
dt := ss.TimeTable
@@ -222,7 +214,9 @@ func (ss *Sim) TimeRun() {
dt.SetCellFloat("Gnmda", ti, g)
dt.SetCellFloat("NMDA", ti, nmda)
}
- ss.TimePlot.Update()
+ if ss.TimePlot != nil {
+ ss.TimePlot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTimeTable(dt *etable.Table) {
@@ -249,73 +243,28 @@ func (ss *Sim) ConfigTimePlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D
return plt
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
- width := 1600
- height := 1200
-
- // gi.WinEventTrace = true
-
- gi.SetAppName("nmda_plot")
- gi.SetAppAbout(`This plots an equation. See emergent on GitHub.`)
-
- win := gi.NewMainWindow("nmdaplot", "Plotting Equations", width, height)
- ss.Win = win
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() *gi.Body {
+ b := gi.NewAppBody("nmda_plot").SetTitle("Plotting Equations")
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
-
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMax()
-
- sv := giv.AddNewStructView(split, "sv")
+ split := gi.NewSplits(b, "split")
+ sv := giv.NewStructView(split, "sv")
sv.SetStruct(ss)
- tv := gi.AddNewTabView(split, "tv")
+ tv := gi.NewTabs(split, "tv")
- plt := tv.AddNewTab(eplot.KiT_Plot2D, "V-G Plot").(*eplot.Plot2D)
- ss.Plot = ss.ConfigPlot(plt, ss.Table)
+ ss.Plot = eplot.NewSubPlot(tv.NewTab("V-G Plot"))
+ ss.ConfigPlot(ss.Plot, ss.Table)
- plt = tv.AddNewTab(eplot.KiT_Plot2D, "TimePlot").(*eplot.Plot2D)
- ss.TimePlot = ss.ConfigTimePlot(plt, ss.TimeTable)
+ ss.TimePlot = eplot.NewSubPlot(tv.NewTab("TimePlot"))
+ ss.ConfigTimePlot(ss.TimePlot, ss.TimeTable)
split.SetSplits(.3, .7)
- tbar.AddAction(gi.ActOpts{Label: "V-G Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.Run()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Time Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.TimeRun()
- vp.SetNeedsFullRender()
+ b.AddAppBar(func(tb *gi.Toolbar) {
+ giv.NewFuncButton(tb, ss.Run).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.TimeRun).SetIcon(icons.PlayArrow)
})
- tbar.AddAction(gi.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send ki.Ki, sig int64, data interface{}) {
- gi.OpenURL("https://github.com/emer/axon/blob/master/chans/nmda_plot/README.md")
- })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.MainMenuUpdated()
- return win
+ return b
}
diff --git a/chans/sahp.go b/chans/sahp.go
index 763ef19de..b768314b9 100644
--- a/chans/sahp.go
+++ b/chans/sahp.go
@@ -4,7 +4,7 @@
package chans
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
//gosl: start chans
@@ -18,26 +18,26 @@ import "github.com/goki/mat32"
// of the n gating value, but tau is computed in any case.
type SahpParams struct {
- // [def: 0.05,0.1] strength of sAHP current
- Gbar float32 `def:"0.05,0.1" desc:"strength of sAHP current"`
+ // strength of sAHP current
+ Gbar float32 `def:"0.05,0.1"`
- // [def: 5,10] [viewif: Gbar>0] time constant for integrating Ca across theta cycles
- CaTau float32 `viewif:"Gbar>0" def:"5,10" desc:"time constant for integrating Ca across theta cycles"`
+ // time constant for integrating Ca across theta cycles
+ CaTau float32 `viewif:"Gbar>0" def:"5,10"`
- // [def: 0.8] [viewif: Gbar>0] integrated Ca offset (threshold) for infinite time N gating function -- where the gate is at 50% strength
- Off float32 `viewif:"Gbar>0" def:"0.8" desc:"integrated Ca offset (threshold) for infinite time N gating function -- where the gate is at 50% strength"`
+ // integrated Ca offset (threshold) for infinite time N gating function -- where the gate is at 50% strength
+ Off float32 `viewif:"Gbar>0" def:"0.8"`
- // [def: 0.02] [viewif: Gbar>0] slope of the infinite time logistic gating function
- Slope float32 `viewif:"Gbar>0" def:"0.02" desc:"slope of the infinite time logistic gating function"`
+ // slope of the infinite time logistic gating function
+ Slope float32 `viewif:"Gbar>0" def:"0.02"`
- // [def: 1] [viewif: Gbar>0] maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp
- TauMax float32 `viewif:"Gbar>0" def:"1" desc:"maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp"`
+ // maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp
+ TauMax float32 `viewif:"Gbar>0" def:"1"`
- // [view: -] 1/Tau
- CaDt float32 `view:"-" inactive:"+" desc:"1/Tau"`
+ // 1/Tau
+ CaDt float32 `view:"-" inactive:"+"`
- // [view: -] 1/Tau
- DtMax float32 `view:"-" inactive:"+" desc:"1/Tau"`
+ // 1/Tau
+ DtMax float32 `view:"-" inactive:"+"`
pad int32
}
diff --git a/chans/sahp_plot/gtigen.go b/chans/sahp_plot/gtigen.go
new file mode 100644
index 000000000..1cb35ca70
--- /dev/null
+++ b/chans/sahp_plot/gtigen.go
@@ -0,0 +1,38 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim holds the params, table, etc",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Sahp", >i.Field{Name: "Sahp", Type: "github.com/emer/axon/chans.SahpParams", LocalType: "chans.SahpParams", Doc: "sAHP function", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"CaStart", >i.Field{Name: "CaStart", Type: "float32", LocalType: "float32", Doc: "starting calcium", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"CaEnd", >i.Field{Name: "CaEnd", Type: "float32", LocalType: "float32", Doc: "ending calcium", Directives: gti.Directives{}, Tag: "def:\"1.5\""}},
+ {"CaStep", >i.Field{Name: "CaStep", Type: "float32", LocalType: "float32", Doc: "calcium increment", Directives: gti.Directives{}, Tag: "def:\"0.01\""}},
+ {"TimeSteps", >i.Field{Name: "TimeSteps", Type: "int", LocalType: "int", Doc: "number of time steps", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeCaStart", >i.Field{Name: "TimeCaStart", Type: "float32", LocalType: "float32", Doc: "time-run starting calcium", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeCaD", >i.Field{Name: "TimeCaD", Type: "float32", LocalType: "float32", Doc: "time-run CaD value at end of each theta cycle", Directives: gti.Directives{}, Tag: ""}},
+ {"Table", >i.Field{Name: "Table", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Plot", >i.Field{Name: "Plot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TimeTable", >i.Field{Name: "TimeTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TimePlot", >i.Field{Name: "TimePlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{
+ {"CaRun", >i.Method{Name: "CaRun", Doc: "CaRun plots the equation as a function of V", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"TimeRun", >i.Method{Name: "TimeRun", Doc: "TimeRun runs the equation over time.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ }),
+})
diff --git a/chans/sahp_plot/sahp_plot.go b/chans/sahp_plot/sahp_plot.go
index faba711dc..5bdd4851b 100644
--- a/chans/sahp_plot/sahp_plot.go
+++ b/chans/sahp_plot/sahp_plot.go
@@ -2,35 +2,33 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// mahp_plot plots an equation updating over time in a etable.Table and Plot2D.
+// sahp_plot plots an equation updating over time in a etable.Table and Plot2D.
package main
+//go:generate goki generate -add-types
+
import (
"strconv"
"github.com/emer/axon/chans"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/icons"
)
-func main() {
- TheSim.Config()
- gimain.Main(func() { // this starts gui -- requires valid OpenGL display connection (e.g., X11)
- guirun()
- })
-}
+func main() { gimain.Run(app) }
-func guirun() {
- TheSim.CaRun()
- win := TheSim.ConfigGui()
- win.StartEventLoop()
+func app() {
+ sim := &Sim{}
+ sim.Config()
+ sim.CaRun()
+ b := sim.ConfigGUI()
+ b.NewWindow().Run().Wait()
}
// LogPrec is precision for saving float values in logs
@@ -39,49 +37,40 @@ const LogPrec = 4
// Sim holds the params, table, etc
type Sim struct {
- // [view: inline] sAHP function
- Sahp chans.SahpParams `view:"inline" desc:"sAHP function"`
+ // sAHP function
+ Sahp chans.SahpParams `view:"inline"`
- // [def: 0] starting calcium
- CaStart float32 `def:"0" desc:"starting calcium"`
+ // starting calcium
+ CaStart float32 `def:"0"`
- // [def: 1.5] ending calcium
- CaEnd float32 `def:"1.5" desc:"ending calcium"`
+ // ending calcium
+ CaEnd float32 `def:"1.5"`
- // [def: 0.01] calcium increment
- CaStep float32 `def:"0.01" desc:"calcium increment"`
+ // calcium increment
+ CaStep float32 `def:"0.01"`
// number of time steps
- TimeSteps int `desc:"number of time steps"`
+ TimeSteps int
// time-run starting calcium
- TimeCaStart float32 `desc:"time-run starting calcium"`
+ TimeCaStart float32
// time-run CaD value at end of each theta cycle
- TimeCaD float32 `desc:"time-run CaD value at end of each theta cycle"`
+ TimeCaD float32
- // [view: no-inline] table for plot
- Table *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ Table *etable.Table `view:"no-inline"`
- // [view: -] the plot
- Plot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ Plot *eplot.Plot2D `view:"-"`
- // [view: no-inline] table for plot
- TimeTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ TimeTable *etable.Table `view:"no-inline"`
- // [view: -] the plot
- TimePlot *eplot.Plot2D `view:"-" desc:"the plot"`
-
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
-
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the plot
+ TimePlot *eplot.Plot2D `view:"-"`
}
-// TheSim is the overall state for this simulation
-var TheSim Sim
-
// Config configures all the elements using the standard functions
func (ss *Sim) Config() {
ss.Sahp.Defaults()
@@ -104,7 +93,7 @@ func (ss *Sim) Update() {
}
// CaRun plots the equation as a function of V
-func (ss *Sim) CaRun() {
+func (ss *Sim) CaRun() { //gti:add
ss.Update()
dt := ss.Table
@@ -121,7 +110,9 @@ func (ss *Sim) CaRun() {
dt.SetCellFloat("Ninf", vi, float64(ninf))
dt.SetCellFloat("Tau", vi, float64(tau))
}
- ss.Plot.Update()
+ if ss.Plot != nil {
+ ss.Plot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTable(dt *etable.Table) {
@@ -151,7 +142,7 @@ func (ss *Sim) ConfigPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D {
/////////////////////////////////////////////////////////////////
// TimeRun runs the equation over time.
-func (ss *Sim) TimeRun() {
+func (ss *Sim) TimeRun() { //gti:add
ss.Update()
dt := ss.TimeTable
@@ -181,7 +172,9 @@ func (ss *Sim) TimeRun() {
ca = mp.CaInt(ca, ss.TimeCaD)
n += dn
}
- ss.TimePlot.Update()
+ if ss.TimePlot != nil {
+ ss.TimePlot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTimeTable(dt *etable.Table) {
@@ -218,73 +211,28 @@ func (ss *Sim) ConfigTimePlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D
return plt
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
- width := 1600
- height := 1200
-
- // gi.WinEventTrace = true
-
- gi.SetAppName("sahp_plot")
- gi.SetAppAbout(`This plots an equation. See emergent on GitHub.`)
-
- win := gi.NewMainWindow("sahp_plot", "Plotting Equations", width, height)
- ss.Win = win
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() *gi.Body {
+ b := gi.NewAppBody("sahp_plot").SetTitle("Plotting Equations")
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
-
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMax()
-
- sv := giv.AddNewStructView(split, "sv")
+ split := gi.NewSplits(b, "split")
+ sv := giv.NewStructView(split, "sv")
sv.SetStruct(ss)
- tv := gi.AddNewTabView(split, "tv")
+ tv := gi.NewTabs(split, "tv")
- plt := tv.AddNewTab(eplot.KiT_Plot2D, "Ca-G Plot").(*eplot.Plot2D)
- ss.Plot = ss.ConfigPlot(plt, ss.Table)
+ ss.Plot = eplot.NewSubPlot(tv.NewTab("Ca-G Plot"))
+ ss.ConfigPlot(ss.Plot, ss.Table)
- plt = tv.AddNewTab(eplot.KiT_Plot2D, "TimePlot").(*eplot.Plot2D)
- ss.TimePlot = ss.ConfigTimePlot(plt, ss.TimeTable)
+ ss.TimePlot = eplot.NewSubPlot(tv.NewTab("TimePlot"))
+ ss.ConfigTimePlot(ss.TimePlot, ss.TimeTable)
split.SetSplits(.3, .7)
- tbar.AddAction(gi.ActOpts{Label: "Ca-G Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.CaRun()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Time Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.TimeRun()
- vp.SetNeedsFullRender()
+ b.AddAppBar(func(tb *gi.Toolbar) {
+ giv.NewFuncButton(tb, ss.CaRun).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.TimeRun).SetIcon(icons.PlayArrow)
})
- tbar.AddAction(gi.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send ki.Ki, sig int64, data interface{}) {
- gi.OpenURL("https://github.com/emer/axon/blob/master/chans/sahp_plot/README.md")
- })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.MainMenuUpdated()
- return win
+ return b
}
diff --git a/chans/skca.go b/chans/skca.go
index e9da40727..98423cb7f 100644
--- a/chans/skca.go
+++ b/chans/skca.go
@@ -5,7 +5,7 @@
package chans
import (
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: start chans
@@ -23,41 +23,41 @@ import (
// (also Muddapu & Chakravarthy, 2021): X^h / (X^h + C50^h) where h ~= 4 (hard coded)
type SKCaParams struct {
- // [def: 0,2,3] overall strength of sKCa current -- inactive if 0
- Gbar float32 `def:"0,2,3" desc:"overall strength of sKCa current -- inactive if 0"`
+ // overall strength of sKCa current -- inactive if 0
+ Gbar float32 `def:"0,2,3"`
- // [def: 0.4,0.5] [viewif: Gbar>0] 50% Ca concentration baseline value in Hill equation -- set this to level that activates at reasonable levels of SKCaR
- C50 float32 `viewif:"Gbar>0" def:"0.4,0.5" desc:"50% Ca concentration baseline value in Hill equation -- set this to level that activates at reasonable levels of SKCaR"`
+ // 50% Ca concentration baseline value in Hill equation -- set this to level that activates at reasonable levels of SKCaR
+ C50 float32 `viewif:"Gbar>0" def:"0.4,0.5"`
- // [def: 15] [viewif: Gbar>0] K channel gating factor activation time constant -- roughly 5-15 msec in literature
- ActTau float32 `viewif:"Gbar>0" def:"15" desc:"K channel gating factor activation time constant -- roughly 5-15 msec in literature"`
+ // K channel gating factor activation time constant -- roughly 5-15 msec in literature
+ ActTau float32 `viewif:"Gbar>0" def:"15"`
- // [def: 30] [viewif: Gbar>0] K channel gating factor deactivation time constant -- roughly 30-50 msec in literature
- DeTau float32 `viewif:"Gbar>0" def:"30" desc:"K channel gating factor deactivation time constant -- roughly 30-50 msec in literature"`
+ // K channel gating factor deactivation time constant -- roughly 30-50 msec in literature
+ DeTau float32 `viewif:"Gbar>0" def:"30"`
- // [def: 0.4,0.8] [viewif: Gbar>0] proportion of CaIn intracellular stores that are released per spike, going into CaR
- KCaR float32 `viewif:"Gbar>0" def:"0.4,0.8" desc:"proportion of CaIn intracellular stores that are released per spike, going into CaR"`
+ // proportion of CaIn intracellular stores that are released per spike, going into CaR
+ KCaR float32 `viewif:"Gbar>0" def:"0.4,0.8"`
- // [def: 150,200] [viewif: Gbar>0] SKCaR released calcium decay time constant
- CaRDecayTau float32 `viewif:"Gbar>0" def:"150,200" desc:"SKCaR released calcium decay time constant"`
+ // SKCaR released calcium decay time constant
+ CaRDecayTau float32 `viewif:"Gbar>0" def:"150,200"`
- // [def: 0.01] [viewif: Gbar>0] level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge.
- CaInThr float32 `viewif:"Gbar>0" def:"0.01" desc:"level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge."`
+ // level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge.
+ CaInThr float32 `viewif:"Gbar>0" def:"0.01"`
- // [def: 50] [viewif: Gbar>0] time constant in msec for storing CaIn when activity is below CaInThr
- CaInTau float32 `viewif:"Gbar>0" def:"50" desc:"time constant in msec for storing CaIn when activity is below CaInThr"`
+ // time constant in msec for storing CaIn when activity is below CaInThr
+ CaInTau float32 `viewif:"Gbar>0" def:"50"`
- // [view: -] rate = 1 / tau
- ActDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ ActDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- DeDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ DeDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- CaRDecayDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ CaRDecayDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- CaInDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ CaInDt float32 `view:"-" json:"-" xml:"-"`
}
func (sp *SKCaParams) Defaults() {
diff --git a/chans/skca_plot/gtigen.go b/chans/skca_plot/gtigen.go
new file mode 100644
index 000000000..c043acdb1
--- /dev/null
+++ b/chans/skca_plot/gtigen.go
@@ -0,0 +1,38 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim holds the params, table, etc",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SKCa", >i.Field{Name: "SKCa", Type: "github.com/emer/axon/chans.SKCaParams", LocalType: "chans.SKCaParams", Doc: "SKCa params", Directives: gti.Directives{}, Tag: ""}},
+ {"CaParams", >i.Field{Name: "CaParams", Type: "github.com/emer/axon/kinase.CaParams", LocalType: "kinase.CaParams", Doc: "time constants for integrating Ca from spiking across M, P and D cascading levels", Directives: gti.Directives{}, Tag: ""}},
+ {"NoSpikeThr", >i.Field{Name: "NoSpikeThr", Type: "float32", LocalType: "float32", Doc: "threshold of SK M gating factor above which the neuron cannot spike", Directives: gti.Directives{}, Tag: "def:\"0.5\""}},
+ {"CaStep", >i.Field{Name: "CaStep", Type: "float32", LocalType: "float32", Doc: "Ca conc increment for M gating func plot", Directives: gti.Directives{}, Tag: "def:\"0.05\""}},
+ {"TimeSteps", >i.Field{Name: "TimeSteps", Type: "int", LocalType: "int", Doc: "number of time steps", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeSpike", >i.Field{Name: "TimeSpike", Type: "bool", LocalType: "bool", Doc: "do spiking instead of Ca conc ramp", Directives: gti.Directives{}, Tag: ""}},
+ {"SpikeFreq", >i.Field{Name: "SpikeFreq", Type: "float32", LocalType: "float32", Doc: "spiking frequency", Directives: gti.Directives{}, Tag: ""}},
+ {"Table", >i.Field{Name: "Table", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Plot", >i.Field{Name: "Plot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TimeTable", >i.Field{Name: "TimeTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TimePlot", >i.Field{Name: "TimePlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{
+ {"CamRun", >i.Method{Name: "CamRun", Doc: "CamRun plots the equation as a function of Ca", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"TimeRun", >i.Method{Name: "TimeRun", Doc: "TimeRun runs the equation over time.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ }),
+})
diff --git a/chans/skca_plot/skca_plot.go b/chans/skca_plot/skca_plot.go
index 5432dbee5..633e8353a 100644
--- a/chans/skca_plot/skca_plot.go
+++ b/chans/skca_plot/skca_plot.go
@@ -5,33 +5,31 @@
// ska_plot plots an equation updating over time in a etable.Table and Plot2D.
package main
+//go:generate goki generate -add-types
+
import (
"strconv"
"github.com/emer/axon/chans"
"github.com/emer/axon/kinase"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/icons"
)
-func main() {
- TheSim.Config()
- gimain.Main(func() { // this starts gui -- requires valid OpenGL display connection (e.g., X11)
- guirun()
- })
-}
+func main() { gimain.Run(app) }
-func guirun() {
- TheSim.CamRun()
- win := TheSim.ConfigGui()
- win.StartEventLoop()
+func app() {
+ sim := &Sim{}
+ sim.Config()
+ sim.CamRun()
+ b := sim.ConfigGUI()
+ b.NewWindow().Run().Wait()
}
// LogPrec is precision for saving float values in logs
@@ -41,48 +39,39 @@ const LogPrec = 4
type Sim struct {
// SKCa params
- SKCa chans.SKCaParams `desc:"SKCa params"`
+ SKCa chans.SKCaParams
// time constants for integrating Ca from spiking across M, P and D cascading levels
- CaParams kinase.CaParams `desc:"time constants for integrating Ca from spiking across M, P and D cascading levels"`
+ CaParams kinase.CaParams
- // [def: 0.5] threshold of SK M gating factor above which the neuron cannot spike
- NoSpikeThr float32 `def:"0.5" desc:"threshold of SK M gating factor above which the neuron cannot spike"`
+ // threshold of SK M gating factor above which the neuron cannot spike
+ NoSpikeThr float32 `def:"0.5"`
- // [def: 0.05] Ca conc increment for M gating func plot
- CaStep float32 `def:"0.05" desc:"Ca conc increment for M gating func plot"`
+ // Ca conc increment for M gating func plot
+ CaStep float32 `def:"0.05"`
// number of time steps
- TimeSteps int `desc:"number of time steps"`
+ TimeSteps int
// do spiking instead of Ca conc ramp
- TimeSpike bool `desc:"do spiking instead of Ca conc ramp"`
+ TimeSpike bool
// spiking frequency
- SpikeFreq float32 `desc:"spiking frequency"`
+ SpikeFreq float32
- // [view: no-inline] table for plot
- Table *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ Table *etable.Table `view:"no-inline"`
- // [view: -] the plot
- Plot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ Plot *eplot.Plot2D `view:"-"`
- // [view: no-inline] table for plot
- TimeTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ TimeTable *etable.Table `view:"no-inline"`
- // [view: -] the plot
- TimePlot *eplot.Plot2D `view:"-" desc:"the plot"`
-
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
-
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the plot
+ TimePlot *eplot.Plot2D `view:"-"`
}
-// TheSim is the overall state for this simulation
-var TheSim Sim
-
// Config configures all the elements using the standard functions
func (ss *Sim) Config() {
ss.SKCa.Defaults()
@@ -105,7 +94,7 @@ func (ss *Sim) Update() {
}
// CamRun plots the equation as a function of Ca
-func (ss *Sim) CamRun() {
+func (ss *Sim) CamRun() { //gti:add
ss.Update()
dt := ss.Table
@@ -120,7 +109,9 @@ func (ss *Sim) CamRun() {
dt.SetCellFloat("Mhill", vi, float64(mh))
dt.SetCellFloat("Mgw06", vi, float64(mg))
}
- ss.Plot.Update()
+ if ss.Plot != nil {
+ ss.Plot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTable(dt *etable.Table) {
@@ -150,7 +141,7 @@ func (ss *Sim) ConfigPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D {
/////////////////////////////////////////////////////////////////
// TimeRun runs the equation over time.
-func (ss *Sim) TimeRun() {
+func (ss *Sim) TimeRun() { //gti:add
ss.Update()
dt := ss.TimeTable
@@ -190,7 +181,9 @@ func (ss *Sim) TimeRun() {
}
ss.CaParams.FmSpike(spike, &caM, &caP, &caD)
}
- ss.TimePlot.Update()
+ if ss.TimePlot != nil {
+ ss.TimePlot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTimeTable(dt *etable.Table) {
@@ -227,73 +220,28 @@ func (ss *Sim) ConfigTimePlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D
return plt
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
- width := 1600
- height := 1200
-
- // gi.WinEventTrace = true
-
- gi.SetAppName("skca_plot")
- gi.SetAppAbout(`This plots an equation. See emergent on GitHub.`)
-
- win := gi.NewMainWindow("skca_plot", "Plotting Equations", width, height)
- ss.Win = win
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() *gi.Body {
+ b := gi.NewAppBody("skca_plot").SetTitle("Plotting Equations")
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
-
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMax()
-
- sv := giv.AddNewStructView(split, "sv")
+ split := gi.NewSplits(b, "split")
+ sv := giv.NewStructView(split, "sv")
sv.SetStruct(ss)
- tv := gi.AddNewTabView(split, "tv")
+ tv := gi.NewTabs(split, "tv")
- plt := tv.AddNewTab(eplot.KiT_Plot2D, "Ca-G Plot").(*eplot.Plot2D)
- ss.Plot = ss.ConfigPlot(plt, ss.Table)
+ ss.Plot = eplot.NewSubPlot(tv.NewTab("Ca-G Plot"))
+ ss.ConfigPlot(ss.Plot, ss.Table)
- plt = tv.AddNewTab(eplot.KiT_Plot2D, "TimePlot").(*eplot.Plot2D)
- ss.TimePlot = ss.ConfigTimePlot(plt, ss.TimeTable)
+ ss.TimePlot = eplot.NewSubPlot(tv.NewTab("TimePlot"))
+ ss.ConfigTimePlot(ss.TimePlot, ss.TimeTable)
split.SetSplits(.3, .7)
- tbar.AddAction(gi.ActOpts{Label: "Ca-M Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.CamRun()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Time Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.TimeRun()
- vp.SetNeedsFullRender()
+ b.AddAppBar(func(tb *gi.Toolbar) {
+ giv.NewFuncButton(tb, ss.CamRun).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.TimeRun).SetIcon(icons.PlayArrow)
})
- tbar.AddAction(gi.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send ki.Ki, sig int64, data interface{}) {
- gi.OpenURL("https://github.com/emer/axon/blob/master/chans/skca_plot/README.md")
- })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.MainMenuUpdated()
- return win
+ return b
}
diff --git a/chans/vgcc.go b/chans/vgcc.go
index dbcaa36ee..68e5e3d4d 100644
--- a/chans/vgcc.go
+++ b/chans/vgcc.go
@@ -5,7 +5,7 @@
package chans
import (
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: start chans
@@ -16,11 +16,11 @@ import (
// In particular look at the file MODEL/Poirazi_cell/CaL.g.
type VGCCParams struct {
- // [def: 0.02,0.12] strength of VGCC current -- 0.12 value is from Urakubo et al (2008) model -- best fits actual model behavior using axon equations (1.5 nominal in that model), 0.02 works better in practice for not getting stuck in high plateau firing
- Gbar float32 `def:"0.02,0.12" desc:"strength of VGCC current -- 0.12 value is from Urakubo et al (2008) model -- best fits actual model behavior using axon equations (1.5 nominal in that model), 0.02 works better in practice for not getting stuck in high plateau firing"`
+ // strength of VGCC current -- 0.12 value is from Urakubo et al (2008) model -- best fits actual model behavior using axon equations (1.5 nominal in that model), 0.02 works better in practice for not getting stuck in high plateau firing
+ Gbar float32 `def:"0.02,0.12"`
- // [def: 25] [viewif: Gbar>0] calcium from conductance factor -- important for learning contribution of VGCC
- Ca float32 `viewif:"Gbar>0" def:"25" desc:"calcium from conductance factor -- important for learning contribution of VGCC"`
+ // calcium from conductance factor -- important for learning contribution of VGCC
+ Ca float32 `viewif:"Gbar>0" def:"25"`
pad, pad1 int32
}
diff --git a/chans/vgcc_plot/gtigen.go b/chans/vgcc_plot/gtigen.go
new file mode 100644
index 000000000..50a5a92ca
--- /dev/null
+++ b/chans/vgcc_plot/gtigen.go
@@ -0,0 +1,40 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim holds the params, table, etc",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"VGCC", >i.Field{Name: "VGCC", Type: "github.com/emer/axon/chans.VGCCParams", LocalType: "chans.VGCCParams", Doc: "VGCC function", Directives: gti.Directives{}, Tag: ""}},
+ {"Vstart", >i.Field{Name: "Vstart", Type: "float32", LocalType: "float32", Doc: "starting voltage", Directives: gti.Directives{}, Tag: "def:\"-90\""}},
+ {"Vend", >i.Field{Name: "Vend", Type: "float32", LocalType: "float32", Doc: "ending voltage", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Vstep", >i.Field{Name: "Vstep", Type: "float32", LocalType: "float32", Doc: "voltage increment", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"TimeSteps", >i.Field{Name: "TimeSteps", Type: "int", LocalType: "int", Doc: "number of time steps", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeSpike", >i.Field{Name: "TimeSpike", Type: "bool", LocalType: "bool", Doc: "do spiking instead of voltage ramp", Directives: gti.Directives{}, Tag: ""}},
+ {"SpikeFreq", >i.Field{Name: "SpikeFreq", Type: "float32", LocalType: "float32", Doc: "spiking frequency", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeVstart", >i.Field{Name: "TimeVstart", Type: "float32", LocalType: "float32", Doc: "time-run starting membrane potential", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeVend", >i.Field{Name: "TimeVend", Type: "float32", LocalType: "float32", Doc: "time-run ending membrane potential", Directives: gti.Directives{}, Tag: ""}},
+ {"Table", >i.Field{Name: "Table", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Plot", >i.Field{Name: "Plot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TimeTable", >i.Field{Name: "TimeTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TimePlot", >i.Field{Name: "TimePlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{
+ {"VmRun", >i.Method{Name: "VmRun", Doc: "VmRun plots the equation as a function of V", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"TimeRun", >i.Method{Name: "TimeRun", Doc: "TimeRun runs the equation over time.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ }),
+})
diff --git a/chans/vgcc_plot/vgcc_plot.go b/chans/vgcc_plot/vgcc_plot.go
index f147d4ab6..31e924385 100644
--- a/chans/vgcc_plot/vgcc_plot.go
+++ b/chans/vgcc_plot/vgcc_plot.go
@@ -5,32 +5,30 @@
// vgcc_plot plots an equation updating over time in a etable.Table and Plot2D.
package main
+//go:generate goki generate -add-types
+
import (
"strconv"
"github.com/emer/axon/chans"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/icons"
)
-func main() {
- TheSim.Config()
- gimain.Main(func() { // this starts gui -- requires valid OpenGL display connection (e.g., X11)
- guirun()
- })
-}
+func main() { gimain.Run(app) }
-func guirun() {
- TheSim.VmRun()
- win := TheSim.ConfigGui()
- win.StartEventLoop()
+func app() {
+ sim := &Sim{}
+ sim.Config()
+ sim.VmRun()
+ b := sim.ConfigGUI()
+ b.NewWindow().Run().Wait()
}
// LogPrec is precision for saving float values in logs
@@ -40,54 +38,45 @@ const LogPrec = 4
type Sim struct {
// VGCC function
- VGCC chans.VGCCParams `desc:"VGCC function"`
+ VGCC chans.VGCCParams
- // [def: -90] starting voltage
- Vstart float32 `def:"-90" desc:"starting voltage"`
+ // starting voltage
+ Vstart float32 `def:"-90"`
- // [def: 0] ending voltage
- Vend float32 `def:"0" desc:"ending voltage"`
+ // ending voltage
+ Vend float32 `def:"0"`
- // [def: 1] voltage increment
- Vstep float32 `def:"1" desc:"voltage increment"`
+ // voltage increment
+ Vstep float32 `def:"1"`
// number of time steps
- TimeSteps int `desc:"number of time steps"`
+ TimeSteps int
// do spiking instead of voltage ramp
- TimeSpike bool `desc:"do spiking instead of voltage ramp"`
+ TimeSpike bool
// spiking frequency
- SpikeFreq float32 `desc:"spiking frequency"`
+ SpikeFreq float32
// time-run starting membrane potential
- TimeVstart float32 `desc:"time-run starting membrane potential"`
+ TimeVstart float32
// time-run ending membrane potential
- TimeVend float32 `desc:"time-run ending membrane potential"`
+ TimeVend float32
- // [view: no-inline] table for plot
- Table *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ Table *etable.Table `view:"no-inline"`
- // [view: -] the plot
- Plot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ Plot *eplot.Plot2D `view:"-"`
- // [view: no-inline] table for plot
- TimeTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ TimeTable *etable.Table `view:"no-inline"`
- // [view: -] the plot
- TimePlot *eplot.Plot2D `view:"-" desc:"the plot"`
-
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
-
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the plot
+ TimePlot *eplot.Plot2D `view:"-"`
}
-// TheSim is the overall state for this simulation
-var TheSim Sim
-
// Config configures all the elements using the standard functions
func (ss *Sim) Config() {
ss.VGCC.Defaults()
@@ -112,7 +101,7 @@ func (ss *Sim) Update() {
}
// VmRun plots the equation as a function of V
-func (ss *Sim) VmRun() {
+func (ss *Sim) VmRun() { //gti:add
ss.Update()
dt := ss.Table
@@ -134,7 +123,9 @@ func (ss *Sim) VmRun() {
dt.SetCellFloat("dM", vi, float64(dm))
dt.SetCellFloat("dH", vi, float64(dh))
}
- ss.Plot.Update()
+ if ss.Plot != nil {
+ ss.Plot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTable(dt *etable.Table) {
@@ -168,7 +159,7 @@ func (ss *Sim) ConfigPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D {
/////////////////////////////////////////////////////////////////
// TimeRun runs the equation over time.
-func (ss *Sim) TimeRun() {
+func (ss *Sim) TimeRun() { //gti:add
ss.Update()
dt := ss.TimeTable
@@ -212,7 +203,9 @@ func (ss *Sim) TimeRun() {
}
}
}
- ss.TimePlot.Update()
+ if ss.TimePlot != nil {
+ ss.TimePlot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTimeTable(dt *etable.Table) {
@@ -246,73 +239,28 @@ func (ss *Sim) ConfigTimePlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D
return plt
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
- width := 1600
- height := 1200
-
- // gi.WinEventTrace = true
-
- gi.SetAppName("vgcc_plot")
- gi.SetAppAbout(`This plots an equation. See emergent on GitHub.`)
-
- win := gi.NewMainWindow("vgcc_plot", "Plotting Equations", width, height)
- ss.Win = win
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() *gi.Body {
+ b := gi.NewAppBody("vgcc_plot").SetTitle("Plotting Equations")
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
-
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMax()
-
- sv := giv.AddNewStructView(split, "sv")
+ split := gi.NewSplits(b, "split")
+ sv := giv.NewStructView(split, "sv")
sv.SetStruct(ss)
- tv := gi.AddNewTabView(split, "tv")
+ tv := gi.NewTabs(split, "tv")
- plt := tv.AddNewTab(eplot.KiT_Plot2D, "V-G Plot").(*eplot.Plot2D)
- ss.Plot = ss.ConfigPlot(plt, ss.Table)
+ ss.Plot = eplot.NewSubPlot(tv.NewTab("V-G Plot"))
+ ss.ConfigPlot(ss.Plot, ss.Table)
- plt = tv.AddNewTab(eplot.KiT_Plot2D, "TimePlot").(*eplot.Plot2D)
- ss.TimePlot = ss.ConfigTimePlot(plt, ss.TimeTable)
+ ss.TimePlot = eplot.NewSubPlot(tv.NewTab("TimePlot"))
+ ss.ConfigTimePlot(ss.TimePlot, ss.TimeTable)
split.SetSplits(.3, .7)
- tbar.AddAction(gi.ActOpts{Label: "V-G Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.VmRun()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Time Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.TimeRun()
- vp.SetNeedsFullRender()
+ b.AddAppBar(func(tb *gi.Toolbar) {
+ giv.NewFuncButton(tb, ss.VmRun).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.TimeRun).SetIcon(icons.PlayArrow)
})
- tbar.AddAction(gi.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send ki.Ki, sig int64, data interface{}) {
- gi.OpenURL("https://github.com/emer/axon/blob/master/chans/vgcc_plot/README.md")
- })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.MainMenuUpdated()
- return win
+ return b
}
diff --git a/examples/attn_trn/attn.go b/examples/attn_trn/attn.go
index 06cf54a7f..f7fd56be6 100644
--- a/examples/attn_trn/attn.go
+++ b/examples/attn_trn/attn.go
@@ -18,24 +18,24 @@ import (
"strconv"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/evec"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/split"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/evec"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
"github.com/goki/ki/ki"
"github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/split"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/mat32/v2"
)
// this is the stub main for gogi that calls our actual mainrun function, at end of file
@@ -216,89 +216,89 @@ var ParamSets = params.Sets{
// for the fields which provide hints to how things should be displayed).
type Sim struct {
- // [def: 200] number of cycles per trial
- Cycles int `def:"200" desc:"number of cycles per trial"`
+ // number of cycles per trial
+ Cycles int `def:"200"`
- // [def: 10] number of runs to run to collect stats
- Runs int `def:"10" desc:"number of runs to run to collect stats"`
+ // number of runs to run to collect stats
+ Runs int `def:"10"`
- // [def: true] sodium (Na) gated potassium (K) channels that cause neurons to fatigue over time
- KNaAdapt bool `def:"true" desc:"sodium (Na) gated potassium (K) channels that cause neurons to fatigue over time"`
+ // sodium (Na) gated potassium (K) channels that cause neurons to fatigue over time
+ KNaAdapt bool `def:"true"`
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: Standard same-to-same size topographic projection]
+ //
Prjn3x3Skp1 *prjn.PoolTile `view:"Standard same-to-same size topographic projection"`
- // [view: Standard same-to-same size topographic projection]
+ //
Prjn5x5Skp1 *prjn.PoolTile `view:"Standard same-to-same size topographic projection"`
// select which type of test (input patterns) to use
- Test TestType `desc:"select which type of test (input patterns) to use"`
+ Test TestType
- // [view: no-inline] testing trial-level log data -- click to see record of network's response to each input
- TstTrlLog *etable.Table `view:"no-inline" desc:"testing trial-level log data -- click to see record of network's response to each input"`
+ // testing trial-level log data -- click to see record of network's response to each input
+ TstTrlLog *etable.Table `view:"no-inline"`
- // [view: no-inline] aggregated testing data
- TstRunLog *etable.Table `view:"no-inline" desc:"aggregated testing data"`
+ // aggregated testing data
+ TstRunLog *etable.Table `view:"no-inline"`
- // [view: no-inline] aggregate stats on testing data
- TstStats *etable.Table `view:"no-inline" desc:"aggregate stats on testing data"`
+ // aggregate stats on testing data
+ TstStats *etable.Table `view:"no-inline"`
- // [view: no-inline] full collection of param sets -- not really interesting for this model
- Params params.Sets `view:"no-inline" desc:"full collection of param sets -- not really interesting for this model"`
+ // full collection of param sets -- not really interesting for this model
+ Params params.Sets `view:"no-inline"`
// Testing environment -- manages iterating over testing
- TestEnv AttnEnv `desc:"Testing environment -- manages iterating over testing"`
+ TestEnv AttnEnv
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
// whether to update the network view while running
- ViewOn bool `desc:"whether to update the network view while running"`
+ ViewOn bool
// at what time scale to update the display during testing? Change to AlphaCyc to make display updating go faster
- ViewUpdt axon.TimeScales `desc:"at what time scale to update the display during testing? Change to AlphaCyc to make display updating go faster"`
+ ViewUpdt axon.TimeScales
// layer to measure attentional effects on
- AttnLay string `desc:"layer to measure attentional effects on"`
+ AttnLay string
// names of layers to record activations etc of during testing
- TstRecLays []string `desc:"names of layers to record activations etc of during testing"`
+ TstRecLays []string
// max activation in center of stimulus 1 (attended, stronger)
- S1Act float32 `desc:"max activation in center of stimulus 1 (attended, stronger)"`
+ S1Act float32
// max activation in center of stimulus 2 (ignored, weaker)
- S2Act float32 `desc:"max activation in center of stimulus 2 (ignored, weaker)"`
+ S2Act float32
// percent modulation = (S1Act - S2Act) / S1Act
- PctMod float32 `desc:"percent modulation = (S1Act - S2Act) / S1Act"`
+ PctMod float32
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
+ // main GUI window
+ Win *gi.Window `view:"-"`
- // [view: -] the network viewer
- NetView *netview.NetView `view:"-" desc:"the network viewer"`
+ // the network viewer
+ NetView *netview.NetView `view:"-"`
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the master toolbar
+ ToolBar *gi.ToolBar `view:"-"`
- // [view: -] the test-trial plot
- TstTrlPlot *eplot.Plot2D `view:"-" desc:"the test-trial plot"`
+ // the test-trial plot
+ TstTrlPlot *eplot.Plot2D `view:"-"`
- // [view: -] the test-trial plot
- TstRunPlot *eplot.Plot2D `view:"-" desc:"the test-trial plot"`
+ // the test-trial plot
+ TstRunPlot *eplot.Plot2D `view:"-"`
- // [view: -] for holding layer values
- ValsTsrs map[string]*etensor.Float32 `view:"-" desc:"for holding layer values"`
+ // for holding layer values
+ ValsTsrs map[string]*etensor.Float32 `view:"-"`
- // [view: -] true if sim is running
- IsRunning bool `view:"-" desc:"true if sim is running"`
+ // true if sim is running
+ IsRunning bool `view:"-"`
- // [view: -] flag to stop running
- StopNow bool `view:"-" desc:"flag to stop running"`
+ // flag to stop running
+ StopNow bool `view:"-"`
}
// this registers this Sim Type and gives it properties that e.g.,
@@ -933,7 +933,7 @@ func (ss *Sim) ConfigTstRunPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot
func (ss *Sim) ConfigNetView(nv *netview.NetView) {
nv.ViewDefaults()
nv.Scene().Camera.Pose.Pos.Set(0, 1.14, 2.13)
- nv.Scene().Camera.LookAt(mat32.Vec3{0, -0.14, 0}, mat32.Vec3{0, 1, 0})
+ nv.Scene().Camera.LookAt(mat32.V3(0, -0.14, 0), mat32.V3(0, 1, 0))
// nv.SetMaxRecs(300)
}
diff --git a/examples/attn_trn/attn_env.go b/examples/attn_trn/attn_env.go
index aab48b93b..f0e6436bf 100644
--- a/examples/attn_trn/attn_env.go
+++ b/examples/attn_trn/attn_env.go
@@ -9,42 +9,42 @@ package main
import (
"fmt"
- "github.com/emer/emergent/efuns"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/evec"
- "github.com/emer/etable/etensor"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/efuns"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/evec"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/mat32/v2"
)
// Stim describes a single stimulus
type Stim struct {
// position in normalized coordintes
- Pos mat32.Vec2 `desc:"position in normalized coordintes"`
+ Pos mat32.Vec2
// feature number: 0-3 for V1 input, -1 for LIP attn
- Feat int `desc:"feature number: 0-3 for V1 input, -1 for LIP attn"`
+ Feat int
// normalized width
- Width float32 `desc:"normalized width"`
+ Width float32
// normalized contrast level
- Contrast float32 `desc:"normalized contrast level"`
+ Contrast float32
}
// PosXY returns XY position projected into size of grid
func (st *Stim) PosXY(size evec.Vec2i) mat32.Vec2 {
- return mat32.Vec2{st.Pos.X * float32(size.X-1), st.Pos.Y * float32(size.Y-1)}
+ return mat32.V2(st.Pos.X*float32(size.X-1), st.Pos.Y*float32(size.Y-1))
}
// StimSet is a set of stimuli to be presented together
type StimSet struct {
// description of set
- Name string `desc:"description of set"`
+ Name string
// stims to present
- Stims []Stim `desc:"stims to present"`
+ Stims []Stim
}
// Stims is a list of a set of stimuli to present
@@ -57,52 +57,52 @@ type Stims []StimSet
type AttnEnv struct {
// name of this environment
- Nm string `desc:"name of this environment"`
+ Nm string
// description of this environment
- Dsc string `desc:"description of this environment"`
+ Dsc string
// multiplier on contrast function
- ContrastMult float32 `desc:"multiplier on contrast function"`
+ ContrastMult float32
// gain on contrast function inside exponential
- ContrastGain float32 `desc:"gain on contrast function inside exponential"`
+ ContrastGain float32
// offset on contrast function
- ContrastOff float32 `desc:"offset on contrast function"`
+ ContrastOff float32
// use gaussian for LIP -- otherwise fixed circle
- LIPGauss bool `desc:"use gaussian for LIP -- otherwise fixed circle"`
+ LIPGauss bool
// a list of stimuli to present
- Stims Stims `desc:"a list of stimuli to present"`
+ Stims Stims
// current stimuli presented
- CurStim *StimSet `inactive:"+" desc:"current stimuli presented"`
+ CurStim *StimSet `inactive:"+"`
// activation level (midpoint) -- feature is incremented, rest decremented relative to this
- Act float32 `desc:"activation level (midpoint) -- feature is incremented, rest decremented relative to this"`
+ Act float32
// size of V1 Pools
- V1Pools evec.Vec2i `desc:"size of V1 Pools"`
+ V1Pools evec.Vec2i
// size of V1 features per pool
- V1Feats evec.Vec2i `desc:"size of V1 features per pool"`
+ V1Feats evec.Vec2i
// V1 rendered input state, 4D Size x Size
- V1 etensor.Float32 `desc:"V1 rendered input state, 4D Size x Size"`
+ V1 etensor.Float32
// LIP top-down attention
- LIP etensor.Float32 `desc:"LIP top-down attention"`
+ LIP etensor.Float32
- // [view: inline] current run of model as provided during Init
- Run env.Ctr `view:"inline" desc:"current run of model as provided during Init"`
+ // current run of model as provided during Init
+ Run env.Ctr `view:"inline"`
- // [view: inline] number of times through Seq.Max number of sequences
- Epoch env.Ctr `view:"inline" desc:"number of times through Seq.Max number of sequences"`
+ // number of times through Seq.Max number of sequences
+ Epoch env.Ctr `view:"inline"`
- // [view: inline] trial increments over input states -- could add Event as a lower level
- Trial env.Ctr `view:"inline" desc:"trial increments over input states -- could add Event as a lower level"`
+ // trial increments over input states -- could add Event as a lower level
+ Trial env.Ctr `view:"inline"`
}
func (ev *AttnEnv) Name() string { return ev.Nm }
diff --git a/examples/attn_trn/stims.go b/examples/attn_trn/stims.go
index 6d815b6a7..8c1dee92b 100644
--- a/examples/attn_trn/stims.go
+++ b/examples/attn_trn/stims.go
@@ -6,7 +6,7 @@
package main
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
// StimAttnSize is a list of stimuli manipulating the size of stimuli vs. attention
// it is the primary test of Reynolds & Heeger 2009 attentional dynamics.
@@ -16,27 +16,27 @@ import "github.com/goki/mat32"
// small V1 = 0.08, large V1 = 0.012
var StimAttnSizeAll = Stims{
// small input, large attention
- StimSet{"InS_AtL_C0", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.0}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.0}, Stim{mat32.Vec2{.25, .5}, -1, 0.25, 0}}},
- StimSet{"InS_AtL_C1", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.1}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.1}, Stim{mat32.Vec2{.25, .5}, -1, 0.25, 0}}},
- StimSet{"InS_AtL_C2", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.2}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.2}, Stim{mat32.Vec2{.25, .5}, -1, 0.25, 0}}},
- StimSet{"InS_AtL_C3", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.3}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.3}, Stim{mat32.Vec2{.25, .5}, -1, 0.25, 0}}},
- StimSet{"InS_AtL_C4", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.4}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.4}, Stim{mat32.Vec2{.25, .5}, -1, 0.25, 0}}},
- StimSet{"InS_AtL_C5", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.5}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.5}, Stim{mat32.Vec2{.25, .5}, -1, 0.25, 0}}},
- StimSet{"InS_AtL_C6", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.6}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.6}, Stim{mat32.Vec2{.25, .5}, -1, 0.25, 0}}},
- StimSet{"InS_AtL_C7", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.7}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.7}, Stim{mat32.Vec2{.25, .5}, -1, 0.25, 0}}},
- StimSet{"InS_AtL_C8", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.8}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.8}, Stim{mat32.Vec2{.25, .5}, -1, 0.25, 0}}},
- StimSet{"InS_AtL_C9", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.9}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.9}, Stim{mat32.Vec2{.25, .5}, -1, 0.25, 0}}},
+ StimSet{"InS_AtL_C0", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.0}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.0}, Stim{mat32.V2(.25, .5), -1, 0.25, 0}}},
+ StimSet{"InS_AtL_C1", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.1}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.1}, Stim{mat32.V2(.25, .5), -1, 0.25, 0}}},
+ StimSet{"InS_AtL_C2", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.2}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.2}, Stim{mat32.V2(.25, .5), -1, 0.25, 0}}},
+ StimSet{"InS_AtL_C3", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.3}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.3}, Stim{mat32.V2(.25, .5), -1, 0.25, 0}}},
+ StimSet{"InS_AtL_C4", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.4}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.4}, Stim{mat32.V2(.25, .5), -1, 0.25, 0}}},
+ StimSet{"InS_AtL_C5", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.5}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.5}, Stim{mat32.V2(.25, .5), -1, 0.25, 0}}},
+ StimSet{"InS_AtL_C6", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.6}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.6}, Stim{mat32.V2(.25, .5), -1, 0.25, 0}}},
+ StimSet{"InS_AtL_C7", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.7}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.7}, Stim{mat32.V2(.25, .5), -1, 0.25, 0}}},
+ StimSet{"InS_AtL_C8", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.8}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.8}, Stim{mat32.V2(.25, .5), -1, 0.25, 0}}},
+ StimSet{"InS_AtL_C9", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.9}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.9}, Stim{mat32.V2(.25, .5), -1, 0.25, 0}}},
// large input, small attention
- StimSet{"InL_AtS_C0", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.0}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.0}, Stim{mat32.Vec2{.25, .5}, -1, 0.11, 0}}},
- StimSet{"InL_AtS_C1", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.1}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.1}, Stim{mat32.Vec2{.25, .5}, -1, 0.11, 0}}},
- StimSet{"InL_AtS_C2", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.2}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.2}, Stim{mat32.Vec2{.25, .5}, -1, 0.11, 0}}},
- StimSet{"InL_AtS_C3", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.3}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.3}, Stim{mat32.Vec2{.25, .5}, -1, 0.11, 0}}},
- StimSet{"InL_AtS_C4", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.4}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.4}, Stim{mat32.Vec2{.25, .5}, -1, 0.11, 0}}},
- StimSet{"InL_AtS_C5", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.5}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.5}, Stim{mat32.Vec2{.25, .5}, -1, 0.11, 0}}},
- StimSet{"InL_AtS_C6", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.6}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.6}, Stim{mat32.Vec2{.25, .5}, -1, 0.11, 0}}},
- StimSet{"InL_AtS_C7", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.7}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.7}, Stim{mat32.Vec2{.25, .5}, -1, 0.11, 0}}},
- StimSet{"InL_AtS_C8", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.8}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.8}, Stim{mat32.Vec2{.25, .5}, -1, 0.11, 0}}},
- StimSet{"InL_AtS_C9", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.9}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.9}, Stim{mat32.Vec2{.25, .5}, -1, 0.11, 0}}},
+ StimSet{"InL_AtS_C0", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.0}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.0}, Stim{mat32.V2(.25, .5), -1, 0.11, 0}}},
+ StimSet{"InL_AtS_C1", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.1}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.1}, Stim{mat32.V2(.25, .5), -1, 0.11, 0}}},
+ StimSet{"InL_AtS_C2", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.2}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.2}, Stim{mat32.V2(.25, .5), -1, 0.11, 0}}},
+ StimSet{"InL_AtS_C3", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.3}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.3}, Stim{mat32.V2(.25, .5), -1, 0.11, 0}}},
+ StimSet{"InL_AtS_C4", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.4}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.4}, Stim{mat32.V2(.25, .5), -1, 0.11, 0}}},
+ StimSet{"InL_AtS_C5", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.5}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.5}, Stim{mat32.V2(.25, .5), -1, 0.11, 0}}},
+ StimSet{"InL_AtS_C6", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.6}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.6}, Stim{mat32.V2(.25, .5), -1, 0.11, 0}}},
+ StimSet{"InL_AtS_C7", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.7}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.7}, Stim{mat32.V2(.25, .5), -1, 0.11, 0}}},
+ StimSet{"InL_AtS_C8", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.8}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.8}, Stim{mat32.V2(.25, .5), -1, 0.11, 0}}},
+ StimSet{"InL_AtS_C9", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.9}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.9}, Stim{mat32.V2(.25, .5), -1, 0.11, 0}}},
}
// StimAttnSizeDebug is a list of stimuli manipulating the size of stimuli vs. attention
@@ -47,33 +47,33 @@ var StimAttnSizeAll = Stims{
// small V1 = 0.08, large V1 = 0.12
var StimAttnSizeDebug = Stims{
// small input, large attention
- StimSet{"InS_AtL_C3", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.3}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.3}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
- StimSet{"InS_AtL_C6", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.6}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.6}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
- StimSet{"InS_AtL_C9", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.9}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.9}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C3", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.3}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.3}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C6", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.6}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.6}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C9", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.9}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.9}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
// large input, small attention
- StimSet{"InL_AtS_C3", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.3}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.3}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
- StimSet{"InL_AtS_C6", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.6}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.6}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
- StimSet{"InL_AtS_C9", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.9}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.9}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C3", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.3}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.3}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C6", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.6}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.6}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C9", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.9}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.9}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
}
// StimAttnSizeC2UP has contrasts C2 and up
var StimAttnSizeC2Up = Stims{
// small input, large attention
- StimSet{"InS_AtL_C2", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.2}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.2}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
- StimSet{"InS_AtL_C3", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.3}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.3}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
- StimSet{"InS_AtL_C4", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.4}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.4}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
- StimSet{"InS_AtL_C5", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.5}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.5}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
- StimSet{"InS_AtL_C6", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.6}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.6}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
- StimSet{"InS_AtL_C7", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.7}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.7}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
- StimSet{"InS_AtL_C8", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.8}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.8}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
- StimSet{"InS_AtL_C9", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.08, 0.9}, Stim{mat32.Vec2{.75, .5}, 2, 0.08, 0.9}, Stim{mat32.Vec2{.25, .5}, -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C2", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.2}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.2}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C3", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.3}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.3}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C4", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.4}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.4}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C5", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.5}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.5}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C6", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.6}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.6}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C7", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.7}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.7}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C8", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.8}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.8}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
+ StimSet{"InS_AtL_C9", []Stim{Stim{mat32.V2(.25, .5), 2, 0.08, 0.9}, Stim{mat32.V2(.75, .5), 2, 0.08, 0.9}, Stim{mat32.V2(.25, .5), -1, 0.30, 0}}},
// large input, small attention
- StimSet{"InL_AtS_C2", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.2}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.2}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
- StimSet{"InL_AtS_C3", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.3}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.3}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
- StimSet{"InL_AtS_C4", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.4}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.4}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
- StimSet{"InL_AtS_C5", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.5}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.5}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
- StimSet{"InL_AtS_C6", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.6}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.6}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
- StimSet{"InL_AtS_C7", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.7}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.7}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
- StimSet{"InL_AtS_C8", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.8}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.8}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
- StimSet{"InL_AtS_C9", []Stim{Stim{mat32.Vec2{.25, .5}, 2, 0.12, 0.9}, Stim{mat32.Vec2{.75, .5}, 2, 0.12, 0.9}, Stim{mat32.Vec2{.25, .5}, -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C2", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.2}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.2}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C3", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.3}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.3}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C4", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.4}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.4}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C5", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.5}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.5}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C6", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.6}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.6}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C7", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.7}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.7}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C8", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.8}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.8}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
+ StimSet{"InL_AtS_C9", []Stim{Stim{mat32.V2(.25, .5), 2, 0.12, 0.9}, Stim{mat32.V2(.75, .5), 2, 0.12, 0.9}, Stim{mat32.V2(.25, .5), -1, 0.09, 0}}},
}
diff --git a/examples/bench/bench.go b/examples/bench/bench.go
index 9d03236be..8a3a2a865 100644
--- a/examples/bench/bench.go
+++ b/examples/bench/bench.go
@@ -15,14 +15,14 @@ import (
"math/rand"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/timer"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/timer"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
)
// note: with 2 hidden layers, this simple test case converges to perfect performance:
diff --git a/examples/bench/bench_test.go b/examples/bench/bench_test.go
index 3a7421aeb..298d146a0 100644
--- a/examples/bench/bench_test.go
+++ b/examples/bench/bench_test.go
@@ -10,9 +10,9 @@ import (
"testing"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/etime"
- "github.com/emer/etable/etable"
- "github.com/goki/gi/gi"
+ "github.com/emer/emergent/v2/etime"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/gi/v2/gi"
)
func init() {
diff --git a/examples/bench_lvis/bench_lvis.go b/examples/bench_lvis/bench_lvis.go
index a56694129..345f3c08c 100644
--- a/examples/bench_lvis/bench_lvis.go
+++ b/examples/bench_lvis/bench_lvis.go
@@ -14,14 +14,14 @@ import (
"math/rand"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/timer"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/timer"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
)
var ParamSets = params.Sets{
diff --git a/examples/bench_lvis/bench_lvis_test.go b/examples/bench_lvis/bench_lvis_test.go
index e6daa6095..55b3c4830 100644
--- a/examples/bench_lvis/bench_lvis_test.go
+++ b/examples/bench_lvis/bench_lvis_test.go
@@ -10,9 +10,9 @@ import (
"testing"
"github.com/emer/axon/axon"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
"github.com/stretchr/testify/require"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
)
var gpu = flag.Bool("gpu", false, "whether to run gpu or not")
diff --git a/examples/bench_objrec/bench_results.md b/examples/bench_objrec/bench_results.md
index c175336b4..f928b9216 100644
--- a/examples/bench_objrec/bench_results.md
+++ b/examples/bench_objrec/bench_results.md
@@ -2,7 +2,30 @@
results are PerTrlMSec
-## MacBook Pro M1
+## MacBook Pro M3 Max
+
+```
+GPU:
+* NData 1:
+* NData 2:
+* NData 4:
+* NData 8: 130
+* NData 16: 80
+* NData 32: 50 <- much faster than CPU
+```
+
+```
+CPU:
+* NData 1:
+* NData 2:
+* NData 4:
+* NData 8: 86 <- much faster than GPU
+* NData 16: 86
+* NData 32: 90
+```
+
+
+## MacBook Pro M1 Max
using default number of threads
diff --git a/examples/bench_objrec/config.go b/examples/bench_objrec/config.go
index 7274052da..f457a66ca 100644
--- a/examples/bench_objrec/config.go
+++ b/examples/bench_objrec/config.go
@@ -4,45 +4,45 @@
package main
-import "github.com/emer/emergent/prjn"
+import "github.com/emer/emergent/v2/prjn"
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
// other params are set via the Env map data mechanism.
-type EnvConfig struct {
+type EnvConfig struct { //gti:add
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
- Env map[string]any `desc:"env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"`
+ Env map[string]any
- // [def: 5] number of units per localist output unit
- NOutPer int `def:"5" desc:"number of units per localist output unit"`
+ // number of units per localist output unit
+ NOutPer int `def:"5"`
}
// ParamConfig has config parameters related to sim params
-type ParamConfig struct {
+type ParamConfig struct { //gti:add
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
- // [view: projection from V1 to V4 which is tiled 4x4 skip 2 with topo scale values]
+ //
V1V4Prjn *prjn.PoolTile `nest:"+" view:"projection from V1 to V4 which is tiled 4x4 skip 2 with topo scale values"`
}
@@ -59,84 +59,84 @@ func (cfg *ParamConfig) Defaults() {
}
// RunConfig has config parameters related to running the sim
-type RunConfig struct {
+type RunConfig struct { //gti:add
- // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"true"`
- // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
- NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."`
+ // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
+ NData int `def:"16" min:"1"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 1] [min: 1] total number of runs to do when running Train
- NRuns int `def:"1" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"1" min:"1"`
- // [def: 200] total number of epochs per run
- NEpochs int `def:"200" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"200"`
- // [def: 128] total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `def:"128" desc:"total number of trials per epoch. Should be an even multiple of NData."`
+ // total number of trials per epoch. Should be an even multiple of NData.
+ NTrials int `def:"128"`
- // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance?
- PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"`
+ // how frequently (in epochs) to compute PCA on hidden representations to measure variance?
+ PCAInterval int `def:"5"`
- // [def: -1] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
- TestInterval int `def:"-1" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"`
+ // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
+ TestInterval int `def:"-1"`
}
// LogConfig has config parameters related to logging data
-type LogConfig struct {
+type LogConfig struct { //gti:add
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
- // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."`
+ // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
+ TestEpoch bool `def:"false" nest:"+"`
- // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."`
+ // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
+ TestTrial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
-type Config struct {
+type Config struct { //gti:add
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] environment configuration options
- Env EnvConfig `view:"add-fields" desc:"environment configuration options"`
+ // environment configuration options
+ Env EnvConfig `view:"add-fields"`
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/bench_objrec/gtigen.go b/examples/bench_objrec/gtigen.go
new file mode 100644
index 000000000..c4afc003d
--- /dev/null
+++ b/examples/bench_objrec/gtigen.go
@@ -0,0 +1,226 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.EnvConfig",
+ ShortName: "main.EnvConfig",
+ IDName: "env-config",
+ Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Env", >i.Field{Name: "Env", Type: "map[string]any", LocalType: "map[string]any", Doc: "env parameters -- can set any field/subfield on Env struct, using standard TOML formatting", Directives: gti.Directives{}, Tag: ""}},
+ {"NOutPer", >i.Field{Name: "NOutPer", Type: "int", LocalType: "int", Doc: "number of units per localist output unit", Directives: gti.Directives{}, Tag: "def:\"5\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"V1V4Prjn", >i.Field{Name: "V1V4Prjn", Type: "*github.com/emer/emergent/v2/prjn.PoolTile", LocalType: "*prjn.PoolTile", Doc: "", Directives: gti.Directives{}, Tag: "nest:\"+\" view:\"projection from V1 to V4 which is tiled 4x4 skip 2 with topo scale values\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"NData", >i.Field{Name: "NData", Type: "int", LocalType: "int", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.", Directives: gti.Directives{}, Tag: "def:\"16\" min:\"1\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"NRuns", >i.Field{Name: "NRuns", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"1\" min:\"1\""}},
+ {"NEpochs", >i.Field{Name: "NEpochs", Type: "int", LocalType: "int", Doc: "total number of epochs per run", Directives: gti.Directives{}, Tag: "def:\"200\""}},
+ {"NTrials", >i.Field{Name: "NTrials", Type: "int", LocalType: "int", Doc: "total number of trials per epoch. Should be an even multiple of NData.", Directives: gti.Directives{}, Tag: "def:\"128\""}},
+ {"PCAInterval", >i.Field{Name: "PCAInterval", Type: "int", LocalType: "int", Doc: "how frequently (in epochs) to compute PCA on hidden representations to measure variance?", Directives: gti.Directives{}, Tag: "def:\"5\""}},
+ {"TestInterval", >i.Field{Name: "TestInterval", Type: "int", LocalType: "int", Doc: "how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing", Directives: gti.Directives{}, Tag: "def:\"-1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "bool", LocalType: "bool", Doc: "if true, save train epoch log to file, as .epc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Run", >i.Field{Name: "Run", Type: "bool", LocalType: "bool", Doc: "if true, save run log to file, as .run.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestEpoch", >i.Field{Name: "TestEpoch", Type: "bool", LocalType: "bool", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestTrial", >i.Field{Name: "TestTrial", Type: "bool", LocalType: "bool", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Env", >i.Field{Name: "Env", Type: "github.com/emer/axon/examples/bench_objrec.EnvConfig", LocalType: "EnvConfig", Doc: "environment configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/bench_objrec.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/bench_objrec.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/bench_objrec.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LEDEnv",
+ ShortName: "main.LEDEnv",
+ IDName: "led-env",
+ Doc: "LEDEnv generates images of old-school \"LED\" style \"letters\" composed of a set of horizontal\nand vertical elements. All possible such combinations of 3 out of 6 line segments are created.\nRenders using SVG.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Nm", >i.Field{Name: "Nm", Type: "string", LocalType: "string", Doc: "name of this environment", Directives: gti.Directives{}, Tag: ""}},
+ {"Dsc", >i.Field{Name: "Dsc", Type: "string", LocalType: "string", Doc: "description of this environment", Directives: gti.Directives{}, Tag: ""}},
+ {"Draw", >i.Field{Name: "Draw", Type: "github.com/emer/axon/examples/bench_objrec.LEDraw", LocalType: "LEDraw", Doc: "draws LEDs onto image", Directives: gti.Directives{}, Tag: ""}},
+ {"Vis", >i.Field{Name: "Vis", Type: "github.com/emer/axon/examples/bench_objrec.Vis", LocalType: "Vis", Doc: "visual processing params", Directives: gti.Directives{}, Tag: ""}},
+ {"NOutPer", >i.Field{Name: "NOutPer", Type: "int", LocalType: "int", Doc: "number of output units per LED item -- spiking benefits from replication", Directives: gti.Directives{}, Tag: ""}},
+ {"MinLED", >i.Field{Name: "MinLED", Type: "int", LocalType: "int", Doc: "minimum LED number to draw (0-19)", Directives: gti.Directives{}, Tag: "min:\"0\" max:\"19\""}},
+ {"MaxLED", >i.Field{Name: "MaxLED", Type: "int", LocalType: "int", Doc: "maximum LED number to draw (0-19)", Directives: gti.Directives{}, Tag: "min:\"0\" max:\"19\""}},
+ {"CurLED", >i.Field{Name: "CurLED", Type: "int", LocalType: "int", Doc: "current LED number that was drawn", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"PrvLED", >i.Field{Name: "PrvLED", Type: "int", LocalType: "int", Doc: "previous LED number that was drawn", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"XFormRand", >i.Field{Name: "XFormRand", Type: "github.com/emer/vision/v2/vxform.Rand", LocalType: "vxform.Rand", Doc: "random transform parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"XForm", >i.Field{Name: "XForm", Type: "github.com/emer/vision/v2/vxform.XForm", LocalType: "vxform.XForm", Doc: "current -- prev transforms", Directives: gti.Directives{}, Tag: ""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "current run of model as provided during Init", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "number of times through Seq.Max number of sequences", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "trial is the step counter within epoch", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"OrigImg", >i.Field{Name: "OrigImg", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "original image prior to random transforms", Directives: gti.Directives{}, Tag: ""}},
+ {"Output", >i.Field{Name: "Output", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "CurLED one-hot output tensor", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LEDraw",
+ ShortName: "main.LEDraw",
+ IDName: "le-draw",
+ Doc: "LEDraw renders old-school \"LED\" style \"letters\" composed of a set of horizontal\nand vertical elements. All possible such combinations of 3 out of 6 line segments are created.\nRenders using SVG.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Width", >i.Field{Name: "Width", Type: "float32", LocalType: "float32", Doc: "line width of LEDraw as percent of display size", Directives: gti.Directives{}, Tag: "def:\"4\""}},
+ {"Size", >i.Field{Name: "Size", Type: "float32", LocalType: "float32", Doc: "size of overall LED as proportion of overall image size", Directives: gti.Directives{}, Tag: "def:\"0.6\""}},
+ {"LineColor", >i.Field{Name: "LineColor", Type: "image/color.RGBA", LocalType: "color.RGBA", Doc: "color name for drawing lines", Directives: gti.Directives{}, Tag: ""}},
+ {"BgColor", >i.Field{Name: "BgColor", Type: "image/color.RGBA", LocalType: "color.RGBA", Doc: "color name for background", Directives: gti.Directives{}, Tag: ""}},
+ {"ImgSize", >i.Field{Name: "ImgSize", Type: "image.Point", LocalType: "image.Point", Doc: "size of image to render", Directives: gti.Directives{}, Tag: ""}},
+ {"Image", >i.Field{Name: "Image", Type: "*image.RGBA", LocalType: "*image.RGBA", Doc: "rendered image", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Paint", >i.Field{Name: "Paint", Type: "*goki.dev/girl/paint.Context", LocalType: "*paint.Context", Doc: "painting context object", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LEDSegs",
+ ShortName: "main.LEDSegs",
+ IDName: "led-segs",
+ Doc: "LEDSegs are the led segments",
+ Directives: gti.Directives{},
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/bench_objrec.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "all parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"Envs", >i.Field{Name: "Envs", Type: "github.com/emer/emergent/v2/env.Envs", LocalType: "env.Envs", Doc: "Environments", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Vis",
+ ShortName: "main.Vis",
+ IDName: "vis",
+ Doc: "Vis encapsulates specific visual processing pipeline for V1 filtering",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"V1sGabor", >i.Field{Name: "V1sGabor", Type: "github.com/emer/vision/v2/gabor.Filter", LocalType: "gabor.Filter", Doc: "V1 simple gabor filter parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"V1sGeom", >i.Field{Name: "V1sGeom", Type: "github.com/emer/vision/v2/vfilter.Geom", LocalType: "vfilter.Geom", Doc: "geometry of input, output for V1 simple-cell processing", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"V1sNeighInhib", >i.Field{Name: "V1sNeighInhib", Type: "github.com/emer/vision/v2/kwta.NeighInhib", LocalType: "kwta.NeighInhib", Doc: "neighborhood inhibition for V1s -- each unit gets inhibition from same feature in nearest orthogonal neighbors -- reduces redundancy of feature code", Directives: gti.Directives{}, Tag: ""}},
+ {"V1sKWTA", >i.Field{Name: "V1sKWTA", Type: "github.com/emer/vision/v2/kwta.KWTA", LocalType: "kwta.KWTA", Doc: "kwta parameters for V1s", Directives: gti.Directives{}, Tag: ""}},
+ {"ImgSize", >i.Field{Name: "ImgSize", Type: "image.Point", LocalType: "image.Point", Doc: "target image size to use -- images will be rescaled to this size", Directives: gti.Directives{}, Tag: ""}},
+ {"V1sGaborTsr", >i.Field{Name: "V1sGaborTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "V1 simple gabor filter tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"ImgTsr", >i.Field{Name: "ImgTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "input image as tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Img", >i.Field{Name: "Img", Type: "image.Image", LocalType: "image.Image", Doc: "current input image", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"V1sTsr", >i.Field{Name: "V1sTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "V1 simple gabor filter output tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"V1sExtGiTsr", >i.Field{Name: "V1sExtGiTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "V1 simple extra Gi from neighbor inhibition tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"V1sKwtaTsr", >i.Field{Name: "V1sKwtaTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "V1 simple gabor filter output, kwta output tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"V1sPoolTsr", >i.Field{Name: "V1sPoolTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "V1 simple gabor filter output, max-pooled 2x2 of V1sKwta tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"V1sUnPoolTsr", >i.Field{Name: "V1sUnPoolTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "V1 simple gabor filter output, un-max-pooled 2x2 of V1sPool tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"V1sAngOnlyTsr", >i.Field{Name: "V1sAngOnlyTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "V1 simple gabor filter output, angle-only features tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"V1sAngPoolTsr", >i.Field{Name: "V1sAngPoolTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "V1 simple gabor filter output, max-pooled 2x2 of AngOnly tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"V1cLenSumTsr", >i.Field{Name: "V1cLenSumTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "V1 complex length sum filter output tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"V1cEndStopTsr", >i.Field{Name: "V1cEndStopTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "V1 complex end stop filter output tensor", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"V1AllTsr", >i.Field{Name: "V1AllTsr", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "Combined V1 output tensor with V1s simple as first two rows, then length sum, then end stops = 5 rows total", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"V1sInhibs", >i.Field{Name: "V1sInhibs", Type: "github.com/emer/vision/v2/fffb.Inhibs", LocalType: "fffb.Inhibs", Doc: "inhibition values for V1s KWTA", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/bench_objrec/led_env.go b/examples/bench_objrec/led_env.go
index f36d67d48..0403f213b 100644
--- a/examples/bench_objrec/led_env.go
+++ b/examples/bench_objrec/led_env.go
@@ -8,10 +8,10 @@ import (
"fmt"
"math/rand"
- "github.com/emer/emergent/env"
- "github.com/emer/etable/etensor"
- "github.com/emer/vision/vfilter"
- "github.com/emer/vision/vxform"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/vision/v2/vfilter"
+ "github.com/emer/vision/v2/vxform"
+ "goki.dev/etable/v2/etensor"
)
// LEDEnv generates images of old-school "LED" style "letters" composed of a set of horizontal
@@ -20,52 +20,52 @@ import (
type LEDEnv struct {
// name of this environment
- Nm string `desc:"name of this environment"`
+ Nm string
// description of this environment
- Dsc string `desc:"description of this environment"`
+ Dsc string
// draws LEDs onto image
- Draw LEDraw `desc:"draws LEDs onto image"`
+ Draw LEDraw
// visual processing params
- Vis Vis `desc:"visual processing params"`
+ Vis Vis
// number of output units per LED item -- spiking benefits from replication
- NOutPer int `desc:"number of output units per LED item -- spiking benefits from replication"`
+ NOutPer int
- // [min: 0] [max: 19] minimum LED number to draw (0-19)
- MinLED int `min:"0" max:"19" desc:"minimum LED number to draw (0-19)"`
+ // minimum LED number to draw (0-19)
+ MinLED int `min:"0" max:"19"`
- // [min: 0] [max: 19] maximum LED number to draw (0-19)
- MaxLED int `min:"0" max:"19" desc:"maximum LED number to draw (0-19)"`
+ // maximum LED number to draw (0-19)
+ MaxLED int `min:"0" max:"19"`
// current LED number that was drawn
- CurLED int `inactive:"+" desc:"current LED number that was drawn"`
+ CurLED int `inactive:"+"`
// previous LED number that was drawn
- PrvLED int `inactive:"+" desc:"previous LED number that was drawn"`
+ PrvLED int `inactive:"+"`
// random transform parameters
- XFormRand vxform.Rand `desc:"random transform parameters"`
+ XFormRand vxform.Rand
// current -- prev transforms
- XForm vxform.XForm `desc:"current -- prev transforms"`
+ XForm vxform.XForm
- // [view: inline] current run of model as provided during Init
- Run env.Ctr `view:"inline" desc:"current run of model as provided during Init"`
+ // current run of model as provided during Init
+ Run env.Ctr `view:"inline"`
- // [view: inline] number of times through Seq.Max number of sequences
- Epoch env.Ctr `view:"inline" desc:"number of times through Seq.Max number of sequences"`
+ // number of times through Seq.Max number of sequences
+ Epoch env.Ctr `view:"inline"`
- // [view: inline] trial is the step counter within epoch
- Trial env.Ctr `view:"inline" desc:"trial is the step counter within epoch"`
+ // trial is the step counter within epoch
+ Trial env.Ctr `view:"inline"`
// original image prior to random transforms
- OrigImg etensor.Float32 `desc:"original image prior to random transforms"`
+ OrigImg etensor.Float32
// CurLED one-hot output tensor
- Output etensor.Float32 `desc:"CurLED one-hot output tensor"`
+ Output etensor.Float32
}
func (ev *LEDEnv) Name() string { return ev.Nm }
diff --git a/examples/bench_objrec/leds.go b/examples/bench_objrec/leds.go
index 092e4fd17..352759c50 100644
--- a/examples/bench_objrec/leds.go
+++ b/examples/bench_objrec/leds.go
@@ -6,47 +6,45 @@ package main
import (
"image"
+ "image/color"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/girl"
+ "goki.dev/colors"
+ "goki.dev/girl/paint"
)
// LEDraw renders old-school "LED" style "letters" composed of a set of horizontal
// and vertical elements. All possible such combinations of 3 out of 6 line segments are created.
// Renders using SVG.
-type LEDraw struct {
+type LEDraw struct { //gti:add
- // [def: 4] line width of LEDraw as percent of display size
- Width float32 `def:"4" desc:"line width of LEDraw as percent of display size"`
+ // line width of LEDraw as percent of display size
+ Width float32 `def:"4"`
- // [def: 0.6] size of overall LED as proportion of overall image size
- Size float32 `def:"0.6" desc:"size of overall LED as proportion of overall image size"`
+ // size of overall LED as proportion of overall image size
+ Size float32 `def:"0.6"`
// color name for drawing lines
- LineColor gi.ColorName `desc:"color name for drawing lines"`
+ LineColor color.RGBA
// color name for background
- BgColor gi.ColorName `desc:"color name for background"`
+ BgColor color.RGBA
// size of image to render
- ImgSize image.Point `desc:"size of image to render"`
+ ImgSize image.Point
- // [view: -] rendered image
- Image *image.RGBA `view:"-" desc:"rendered image"`
+ // rendered image
+ Image *image.RGBA `view:"-"`
- // [view: +] painter object
- Paint girl.Paint `view:"+" desc:"painter object"`
-
- // [view: -] rendering state
- Render girl.State `view:"-" desc:"rendering state"`
+ // painting context object
+ Paint *paint.Context `view:"-"`
}
func (ld *LEDraw) Defaults() {
ld.ImgSize = image.Point{120, 120}
ld.Width = 4
ld.Size = 0.6
- ld.LineColor = "white"
- ld.BgColor = "black"
+ ld.LineColor = colors.White
+ ld.BgColor = colors.Black
}
// Init ensures that the image is created and of the right size, and renderer is initialized
@@ -63,11 +61,10 @@ func (ld *LEDraw) Init() {
if ld.Image == nil {
ld.Image = image.NewRGBA(image.Rectangle{Max: ld.ImgSize})
}
- ld.Render.Init(ld.ImgSize.X, ld.ImgSize.Y, ld.Image)
- ld.Paint.Defaults()
- ld.Paint.StrokeStyle.Width.SetPct(ld.Width)
- ld.Paint.StrokeStyle.Color.SetName(string(ld.LineColor))
- ld.Paint.FillStyle.Color.SetName(string(ld.BgColor))
+ ld.Paint = paint.NewContextFromImage(ld.Image)
+ ld.Paint.StrokeStyle.Width.Pw(ld.Width)
+ ld.Paint.StrokeStyle.Color = colors.C(ld.LineColor)
+ ld.Paint.FillStyle.Color = colors.C(ld.BgColor)
ld.Paint.SetUnitContextExt(ld.ImgSize)
}
@@ -76,12 +73,11 @@ func (ld *LEDraw) Clear() {
if ld.Image == nil {
ld.Init()
}
- ld.Paint.Clear(&ld.Render)
+ ld.Paint.Clear()
}
// DrawSeg draws one segment
func (ld *LEDraw) DrawSeg(seg LEDSegs) {
- rs := &ld.Render
ctrX := float32(ld.ImgSize.X) * 0.5
ctrY := float32(ld.ImgSize.Y) * 0.5
szX := ctrX * ld.Size
@@ -89,19 +85,19 @@ func (ld *LEDraw) DrawSeg(seg LEDSegs) {
// note: top-zero coordinates
switch seg {
case Bottom:
- ld.Paint.DrawLine(rs, ctrX-szX, ctrY+szY, ctrX+szX, ctrY+szY)
+ ld.Paint.DrawLine(ctrX-szX, ctrY+szY, ctrX+szX, ctrY+szY)
case Left:
- ld.Paint.DrawLine(rs, ctrX-szX, ctrY-szY, ctrX-szX, ctrY+szY)
+ ld.Paint.DrawLine(ctrX-szX, ctrY-szY, ctrX-szX, ctrY+szY)
case Right:
- ld.Paint.DrawLine(rs, ctrX+szX, ctrY-szY, ctrX+szX, ctrY+szY)
+ ld.Paint.DrawLine(ctrX+szX, ctrY-szY, ctrX+szX, ctrY+szY)
case Top:
- ld.Paint.DrawLine(rs, ctrX-szX, ctrY-szY, ctrX+szX, ctrY-szY)
+ ld.Paint.DrawLine(ctrX-szX, ctrY-szY, ctrX+szX, ctrY-szY)
case CenterH:
- ld.Paint.DrawLine(rs, ctrX-szX, ctrY, ctrX+szX, ctrY)
+ ld.Paint.DrawLine(ctrX-szX, ctrY, ctrX+szX, ctrY)
case CenterV:
- ld.Paint.DrawLine(rs, ctrX, ctrY-szY, ctrX, ctrY+szY)
+ ld.Paint.DrawLine(ctrX, ctrY-szY, ctrX, ctrY+szY)
}
- ld.Paint.Stroke(rs)
+ ld.Paint.Stroke()
}
// DrawLED draws one LED of given number, based on LEDdata
diff --git a/examples/bench_objrec/objrec.go b/examples/bench_objrec/objrec.go
index 98f1875b1..305844d54 100644
--- a/examples/bench_objrec/objrec.go
+++ b/examples/bench_objrec/objrec.go
@@ -10,36 +10,38 @@ input images.
*/
package main
+//go:generate goki generate -add-types
+
import (
"fmt"
"os"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/etview"
- "github.com/emer/etable/minmax"
- "github.com/emer/etable/split"
- "github.com/emer/etable/tsragg"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/mat32"
- "github.com/goki/vgpu/vgpu"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/etview"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/etable/v2/split"
+ "goki.dev/etable/v2/tsragg"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/mat32/v2"
+ "goki.dev/vgpu/v2/vgpu"
)
func main() {
@@ -47,7 +49,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -63,37 +65,37 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.NetParams `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -349,10 +351,10 @@ func (ss *Sim) ConfigLoops() {
}
})
man.GetLoop(etime.Train, etime.Trial).OnStart.Add("UpdtImage", func() {
- ss.GUI.Grid("Image").UpdateSig()
+ ss.GUI.Grid("Image").SetNeedsRender(true)
})
man.GetLoop(etime.Test, etime.Trial).OnStart.Add("UpdtImage", func() {
- ss.GUI.Grid("Image").UpdateSig()
+ ss.GUI.Grid("Image").SetNeedsRender(true)
})
axon.LooperUpdtNetView(man, &ss.ViewUpdt, ss.Net, ss.NetViewCounters)
@@ -658,10 +660,10 @@ func (ss *Sim) ConfigActRFs() {
////////////////////////////////////////////////////////////////////////////////////////////
// Gui
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "Object Recognition"
- ss.GUI.MakeWindow(ss, "objrec", title, `This simulation explores how a hierarchy of areas in the ventral stream of visual processing (up to inferotemporal (IT) cortex) can produce robust object recognition that is invariant to changes in position, size, etc of retinal input images. See README.md on GitHub.`)
+ ss.GUI.MakeBody(ss, "objrec", title, `This simulation explores how a hierarchy of areas in the ventral stream of visual processing (up to inferotemporal (IT) cortex) can produce robust object recognition that is invariant to changes in position, size, etc of retinal input images. See README.md on GitHub.`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -670,73 +672,74 @@ func (ss *Sim) ConfigGui() *gi.Window {
nv.SetNet(ss.Net)
ss.ViewUpdt.Config(nv, etime.Phase, etime.Phase)
- cam := &(nv.Scene().Camera)
+ cam := &(nv.SceneXYZ().Camera)
cam.Pose.Pos.Set(0.0, 1.733, 2.3)
- cam.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ cam.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
ss.GUI.ViewUpdt = &ss.ViewUpdt
ss.GUI.AddPlots(title, &ss.Logs)
- tg := ss.GUI.TabView.AddNewTab(etview.KiT_TensorGrid, "Image").(*etview.TensorGrid)
- tg.SetStretchMax()
+ tg := etview.NewTensorGrid(ss.GUI.Tabs.NewTab("Image")).
+ SetTensor(&ss.Envs.ByMode(etime.Train).(*LEDEnv).Vis.ImgTsr)
ss.GUI.SetGrid("Image", tg)
- tg.SetTensor(&ss.Envs.ByMode(etime.Train).(*LEDEnv).Vis.ImgTsr)
ss.GUI.AddActRFGridTabs(&ss.Stats.ActRFs)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train, etime.Test})
-
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Test All",
- Icon: "step-fwd",
- Tooltip: "Tests a large same of testing items and records ActRFs.",
- Active: egui.ActiveStopped,
- Func: func() {
- if !ss.GUI.IsRunning {
- ss.GUI.IsRunning = true
- ss.GUI.ToolBar.UpdateActions()
- go ss.RunTestAll()
- }
- },
- })
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train, etime.Test})
+
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Test All",
+ Icon: "step-fwd",
+ Tooltip: "Tests a large same of testing items and records ActRFs.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ if !ss.GUI.IsRunning {
+ ss.GUI.IsRunning = true
+ ss.GUI.UpdateWindow()
+ go ss.RunTestAll()
+ }
+ },
+ })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/bench_objrec/README.md")
- },
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/bench_objrec/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
@@ -746,13 +749,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/bench_objrec/params.go b/examples/bench_objrec/params.go
index 0288f6060..abe58e51d 100644
--- a/examples/bench_objrec/params.go
+++ b/examples/bench_objrec/params.go
@@ -1,8 +1,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets is the default set of parameters -- Base is always applied, and others can be optionally
diff --git a/examples/bench_objrec/params_good/config.toml b/examples/bench_objrec/params_good/config.toml
index 0bd576497..e4394c3a0 100644
--- a/examples/bench_objrec/params_good/config.toml
+++ b/examples/bench_objrec/params_good/config.toml
@@ -47,7 +47,7 @@ Debug = false
[Run]
GPU = true
- NData = 16
+ NData = 32
NThreads = 0
Run = 0
NRuns = 1
diff --git a/examples/bench_objrec/params_good/params_all.txt b/examples/bench_objrec/params_good/params_all.txt
index 11b3a963e..6b50a1812 100644
--- a/examples/bench_objrec/params_good/params_all.txt
+++ b/examples/bench_objrec/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: V4
@@ -52,13 +52,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -78,7 +78,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -136,13 +136,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -162,7 +162,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -220,13 +220,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -246,7 +246,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: ITToOutput
diff --git a/examples/bench_objrec/v1filter.go b/examples/bench_objrec/v1filter.go
index c64561066..bd3c11e08 100644
--- a/examples/bench_objrec/v1filter.go
+++ b/examples/bench_objrec/v1filter.go
@@ -8,78 +8,75 @@ import (
"image"
"github.com/anthonynsimon/bild/transform"
- "github.com/emer/etable/etensor"
- "github.com/emer/leabra/fffb"
- "github.com/emer/vision/gabor"
- "github.com/emer/vision/kwta"
- "github.com/emer/vision/v1complex"
- "github.com/emer/vision/vfilter"
- "github.com/goki/ki/kit"
+ "github.com/emer/vision/v2/fffb"
+ "github.com/emer/vision/v2/gabor"
+ "github.com/emer/vision/v2/kwta"
+ "github.com/emer/vision/v2/v1complex"
+ "github.com/emer/vision/v2/vfilter"
+ "goki.dev/etable/v2/etensor"
)
// Vis encapsulates specific visual processing pipeline for V1 filtering
-type Vis struct {
+type Vis struct { //gti:add
// V1 simple gabor filter parameters
- V1sGabor gabor.Filter `desc:"V1 simple gabor filter parameters"`
+ V1sGabor gabor.Filter
- // [view: inline] geometry of input, output for V1 simple-cell processing
- V1sGeom vfilter.Geom `inactive:"+" view:"inline" desc:"geometry of input, output for V1 simple-cell processing"`
+ // geometry of input, output for V1 simple-cell processing
+ V1sGeom vfilter.Geom `inactive:"+" view:"inline"`
// neighborhood inhibition for V1s -- each unit gets inhibition from same feature in nearest orthogonal neighbors -- reduces redundancy of feature code
- V1sNeighInhib kwta.NeighInhib `desc:"neighborhood inhibition for V1s -- each unit gets inhibition from same feature in nearest orthogonal neighbors -- reduces redundancy of feature code"`
+ V1sNeighInhib kwta.NeighInhib
// kwta parameters for V1s
- V1sKWTA kwta.KWTA `desc:"kwta parameters for V1s"`
+ V1sKWTA kwta.KWTA
// target image size to use -- images will be rescaled to this size
- ImgSize image.Point `desc:"target image size to use -- images will be rescaled to this size"`
+ ImgSize image.Point
- // [view: no-inline] V1 simple gabor filter tensor
- V1sGaborTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter tensor"`
+ // V1 simple gabor filter tensor
+ V1sGaborTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] input image as tensor
- ImgTsr etensor.Float32 `view:"no-inline" desc:"input image as tensor"`
+ // input image as tensor
+ ImgTsr etensor.Float32 `view:"no-inline"`
- // [view: -] current input image
- Img image.Image `view:"-" desc:"current input image"`
+ // current input image
+ Img image.Image `view:"-"`
- // [view: no-inline] V1 simple gabor filter output tensor
- V1sTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output tensor"`
+ // V1 simple gabor filter output tensor
+ V1sTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] V1 simple extra Gi from neighbor inhibition tensor
- V1sExtGiTsr etensor.Float32 `view:"no-inline" desc:"V1 simple extra Gi from neighbor inhibition tensor"`
+ // V1 simple extra Gi from neighbor inhibition tensor
+ V1sExtGiTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] V1 simple gabor filter output, kwta output tensor
- V1sKwtaTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output, kwta output tensor"`
+ // V1 simple gabor filter output, kwta output tensor
+ V1sKwtaTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] V1 simple gabor filter output, max-pooled 2x2 of V1sKwta tensor
- V1sPoolTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output, max-pooled 2x2 of V1sKwta tensor"`
+ // V1 simple gabor filter output, max-pooled 2x2 of V1sKwta tensor
+ V1sPoolTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] V1 simple gabor filter output, un-max-pooled 2x2 of V1sPool tensor
- V1sUnPoolTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output, un-max-pooled 2x2 of V1sPool tensor"`
+ // V1 simple gabor filter output, un-max-pooled 2x2 of V1sPool tensor
+ V1sUnPoolTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] V1 simple gabor filter output, angle-only features tensor
- V1sAngOnlyTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output, angle-only features tensor"`
+ // V1 simple gabor filter output, angle-only features tensor
+ V1sAngOnlyTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] V1 simple gabor filter output, max-pooled 2x2 of AngOnly tensor
- V1sAngPoolTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output, max-pooled 2x2 of AngOnly tensor"`
+ // V1 simple gabor filter output, max-pooled 2x2 of AngOnly tensor
+ V1sAngPoolTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] V1 complex length sum filter output tensor
- V1cLenSumTsr etensor.Float32 `view:"no-inline" desc:"V1 complex length sum filter output tensor"`
+ // V1 complex length sum filter output tensor
+ V1cLenSumTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] V1 complex end stop filter output tensor
- V1cEndStopTsr etensor.Float32 `view:"no-inline" desc:"V1 complex end stop filter output tensor"`
+ // V1 complex end stop filter output tensor
+ V1cEndStopTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] Combined V1 output tensor with V1s simple as first two rows, then length sum, then end stops = 5 rows total
- V1AllTsr etensor.Float32 `view:"no-inline" desc:"Combined V1 output tensor with V1s simple as first two rows, then length sum, then end stops = 5 rows total"`
+ // Combined V1 output tensor with V1s simple as first two rows, then length sum, then end stops = 5 rows total
+ V1AllTsr etensor.Float32 `view:"no-inline"`
- // [view: no-inline] inhibition values for V1s KWTA
- V1sInhibs fffb.Inhibs `view:"no-inline" desc:"inhibition values for V1s KWTA"`
+ // inhibition values for V1s KWTA
+ V1sInhibs fffb.Inhibs `view:"no-inline"`
}
-var KiT_Vis = kit.Types.AddType(&Vis{}, nil)
-
func (vi *Vis) Defaults() {
vi.V1sGabor.Defaults()
sz := 6 // V1mF16 typically = 12, no border, spc = 4 -- using 1/2 that here
diff --git a/examples/boa/armaze/actions_string.go b/examples/boa/armaze/actions_string.go
deleted file mode 100644
index c092190f7..000000000
--- a/examples/boa/armaze/actions_string.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Code generated by "stringer -type=Actions"; DO NOT EDIT.
-
-package armaze
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[Forward-0]
- _ = x[Left-1]
- _ = x[Right-2]
- _ = x[Consume-3]
- _ = x[None-4]
- _ = x[ActionsN-5]
-}
-
-const _Actions_name = "ForwardLeftRightConsumeNoneActionsN"
-
-var _Actions_index = [...]uint8{0, 7, 11, 16, 23, 27, 35}
-
-func (i Actions) String() string {
- if i < 0 || i >= Actions(len(_Actions_index)-1) {
- return "Actions(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Actions_name[_Actions_index[i]:_Actions_index[i+1]]
-}
-
-func (i *Actions) FromString(s string) error {
- for j := 0; j < len(_Actions_index)-1; j++ {
- if s == _Actions_name[_Actions_index[j]:_Actions_index[j+1]] {
- *i = Actions(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: Actions")
-}
-
-var _Actions_descMap = map[Actions]string{
- 0: ``,
- 1: ``,
- 2: ``,
- 3: ``,
- 4: ``,
- 5: ``,
-}
-
-func (i Actions) Desc() string {
- if str, ok := _Actions_descMap[i]; ok {
- return str
- }
- return "Actions(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/examples/boa/armaze/arm.go b/examples/boa/armaze/arm.go
index 84b58bd5b..ce3deb4c0 100644
--- a/examples/boa/armaze/arm.go
+++ b/examples/boa/armaze/arm.go
@@ -4,7 +4,7 @@
package armaze
-import "github.com/emer/etable/minmax"
+import "goki.dev/etable/v2/minmax"
// Arm represents the properties of a given arm of the N-maze.
// Arms have characteristic distance and effort factors for getting
@@ -14,32 +14,32 @@ import "github.com/emer/etable/minmax"
type Arm struct {
// length of arm: distance from CS start to US end for this arm
- Length int `desc:"length of arm: distance from CS start to US end for this arm"`
+ Length int
// range of different effort levels per step (uniformly randomly sampled per step) for going down this arm
- Effort minmax.F32 `desc:"range of different effort levels per step (uniformly randomly sampled per step) for going down this arm"`
+ Effort minmax.F32
// todo: later
// indexes of US[s] present at the end of this arm -- nil if none
// USs []int `desc:"indexes of US[s] present at the end of this arm -- nil if none"`
// index of US present at the end of this arm -- -1 if none
- US int `desc:"index of US present at the end of this arm -- -1 if none"`
+ US int
// index of CS visible at the start of this arm, -1 if none
- CS int `desc:"index of CS visible at the start of this arm, -1 if none"`
+ CS int
- // current expected value = US.Prob * US.Mag * Drives[US] -- computed at start of new approach
- ExValue float32 `inactive:"+" desc:"current expected value = US.Prob * US.Mag * Drives[US] -- computed at start of new approach"`
+ // current expected value = US.Prob * US.Mag * Drives-- computed at start of new approach
+ ExValue float32 `inactive:"+"`
// current expected PVpos value = normalized ExValue -- computed at start of new approach
- ExPVpos float32 `inactive:"+" desc:"current expected PVpos value = normalized ExValue -- computed at start of new approach"`
+ ExPVpos float32 `inactive:"+"`
// current expected PVneg value = normalized time and effort costs
- ExPVneg float32 `inactive:"+" desc:"current expected PVneg value = normalized time and effort costs"`
+ ExPVneg float32 `inactive:"+"`
// current expected utility = effort discounted version of ExPVpos -- computed at start of new approach
- ExUtil float32 `inactive:"+" desc:"current expected utility = effort discounted version of ExPVpos -- computed at start of new approach"`
+ ExUtil float32 `inactive:"+"`
}
func (arm *Arm) Defaults() {
@@ -60,11 +60,11 @@ func (arm *Arm) Empty() {
type USParams struct {
// if true is a negative valence US -- these are after the first NDrives USs
- Negative bool `desc:"if true is a negative valence US -- these are after the first NDrives USs"`
+ Negative bool
// range of different magnitudes (uniformly sampled)
- Mag minmax.F32 `desc:"range of different magnitudes (uniformly sampled)"`
+ Mag minmax.F32
// probability of delivering the US
- Prob float32 `desc:"probability of delivering the US"`
+ Prob float32
}
diff --git a/examples/boa/armaze/config.go b/examples/boa/armaze/config.go
index 858a97f3b..2e8fd57ba 100644
--- a/examples/boa/armaze/config.go
+++ b/examples/boa/armaze/config.go
@@ -4,71 +4,71 @@
package armaze
-import "github.com/emer/etable/minmax"
+import "goki.dev/etable/v2/minmax"
// Params are misc environment parameters
type Params struct {
- // [def: {'Min':0.5, 'Max':0.5}] effort for turning
- TurnEffort minmax.F32 `nest:"+" def:"{'Min':0.5, 'Max':0.5}" desc:"effort for turning"`
+ // effort for turning
+ TurnEffort minmax.F32 `nest:"+" def:"{'Min':0.5, 'Max':0.5}"`
- // [def: {'Min':0.5, 'Max':0.5}] effort for consuming US
- ConsumeEffort minmax.F32 `nest:"+" def:"{'Min':0.5, 'Max':0.5}" desc:"effort for consuming US"`
+ // effort for consuming US
+ ConsumeEffort minmax.F32 `nest:"+" def:"{'Min':0.5, 'Max':0.5}"`
- // [def: true] always turn left -- zoolander style -- reduces degrees of freedom in evaluating behavior
- AlwaysLeft bool `def:"true" desc:"always turn left -- zoolander style -- reduces degrees of freedom in evaluating behavior"`
+ // always turn left -- zoolander style -- reduces degrees of freedom in evaluating behavior
+ AlwaysLeft bool `def:"true"`
- // [def: false] permute the order of CSs prior to applying them to arms -- having this off makes it easier to visually determine match between Drive and arm approach, and shouldn't make any difference to behavior (model doesn't know about this ordering).
- PermuteCSs bool `def:"false" desc:"permute the order of CSs prior to applying them to arms -- having this off makes it easier to visually determine match between Drive and arm approach, and shouldn't make any difference to behavior (model doesn't know about this ordering)."`
+ // permute the order of CSs prior to applying them to arms -- having this off makes it easier to visually determine match between Drive and arm approach, and shouldn't make any difference to behavior (model doesn't know about this ordering).
+ PermuteCSs bool `def:"false"`
- // [def: true] after running down an Arm, a new random starting location is selected (otherwise same arm as last run)
- RandomStart bool `def:"true" desc:"after running down an Arm, a new random starting location is selected (otherwise same arm as last run)"`
+ // after running down an Arm, a new random starting location is selected (otherwise same arm as last run)
+ RandomStart bool `def:"true"`
- // [def: true] if true, allow movement between arms just by going Left or Right -- otherwise once past the start, no switching is allowed
- OpenArms bool `def:"true" desc:"if true, allow movement between arms just by going Left or Right -- otherwise once past the start, no switching is allowed"`
+ // if true, allow movement between arms just by going Left or Right -- otherwise once past the start, no switching is allowed
+ OpenArms bool `def:"true"`
- // [def: {'Min':0, 'Max':0}] [view: inline] strength of inactive inputs (e.g., Drives in Approach paradigm)
- Inactive minmax.F32 `nest:"+" def:"{'Min':0, 'Max':0}" view:"inline" desc:"strength of inactive inputs (e.g., Drives in Approach paradigm)"`
+ // strength of inactive inputs (e.g., Drives in Approach paradigm)
+ Inactive minmax.F32 `nest:"+" def:"{'Min':0, 'Max':0}" view:"inline"`
- // [def: 4] number of Y-axis repetitions of localist stimuli -- for redundancy in spiking nets
- NYReps int `def:"4" desc:"number of Y-axis repetitions of localist stimuli -- for redundancy in spiking nets"`
+ // number of Y-axis repetitions of localist stimuli -- for redundancy in spiking nets
+ NYReps int `def:"4"`
}
// Config has environment configuration
type Config struct {
// experimental paradigm that governs the configuration and updating of environment state over time and the appropriate evaluation criteria.
- Paradigm Paradigms `desc:"experimental paradigm that governs the configuration and updating of environment state over time and the appropriate evaluation criteria."`
+ Paradigm Paradigms
// for debugging, print out key steps including a trace of the action generation logic
- Debug bool `desc:"for debugging, print out key steps including a trace of the action generation logic"`
+ Debug bool
// number of different drive-like body states (hunger, thirst, etc), that are satisfied by a corresponding positive US outcome -- this does not include the first curiosity drive
- NDrives int `desc:"number of different drive-like body states (hunger, thirst, etc), that are satisfied by a corresponding positive US outcome -- this does not include the first curiosity drive"`
+ NDrives int
// number of negative US outcomes -- these are added after NDrives positive USs to total US list
- NNegUSs int `desc:"number of negative US outcomes -- these are added after NDrives positive USs to total US list"`
+ NNegUSs int
// total number of USs = NDrives + NNegUSs
- NUSs int `inactive:"+" desc:"total number of USs = NDrives + NNegUSs"`
+ NUSs int `inactive:"+"`
// number of different arms
- NArms int `desc:"number of different arms"`
+ NArms int
// maximum arm length (distance)
- MaxArmLength int `desc:"maximum arm length (distance)"`
+ MaxArmLength int
// number of different CSs -- typically at least a unique CS per US -- relationship is determined in the US params
- NCSs int `desc:"number of different CSs -- typically at least a unique CS per US -- relationship is determined in the US params"`
+ NCSs int
// parameters associated with each US. The first NDrives are positive USs, and beyond that are negative USs
- USs []*USParams `desc:"parameters associated with each US. The first NDrives are positive USs, and beyond that are negative USs"`
+ USs []*USParams
// state of each arm: dist, effort, US, CS
- Arms []*Arm `desc:"state of each arm: dist, effort, US, CS"`
+ Arms []*Arm
- // [view: add-fields] misc params
- Params Params `view:"add-fields" desc:"misc params"`
+ // misc params
+ Params Params `view:"add-fields"`
}
func (cfg *Config) Defaults() {
diff --git a/examples/boa/armaze/enumgen.go b/examples/boa/armaze/enumgen.go
new file mode 100644
index 000000000..1fbc55daa
--- /dev/null
+++ b/examples/boa/armaze/enumgen.go
@@ -0,0 +1,379 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package armaze
+
+import (
+ "errors"
+ "log"
+ "strconv"
+ "strings"
+
+ "goki.dev/enums"
+)
+
+var _ActionsValues = []Actions{0, 1, 2, 3, 4}
+
+// ActionsN is the highest valid value
+// for type Actions, plus one.
+const ActionsN Actions = 5
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _ActionsNoOp() {
+ var x [1]struct{}
+ _ = x[Forward-(0)]
+ _ = x[Left-(1)]
+ _ = x[Right-(2)]
+ _ = x[Consume-(3)]
+ _ = x[None-(4)]
+}
+
+var _ActionsNameToValueMap = map[string]Actions{
+ `Forward`: 0,
+ `forward`: 0,
+ `Left`: 1,
+ `left`: 1,
+ `Right`: 2,
+ `right`: 2,
+ `Consume`: 3,
+ `consume`: 3,
+ `None`: 4,
+ `none`: 4,
+}
+
+var _ActionsDescMap = map[Actions]string{
+ 0: ``,
+ 1: ``,
+ 2: ``,
+ 3: ``,
+ 4: ``,
+}
+
+var _ActionsMap = map[Actions]string{
+ 0: `Forward`,
+ 1: `Left`,
+ 2: `Right`,
+ 3: `Consume`,
+ 4: `None`,
+}
+
+// String returns the string representation
+// of this Actions value.
+func (i Actions) String() string {
+ if str, ok := _ActionsMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the Actions value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *Actions) SetString(s string) error {
+ if val, ok := _ActionsNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _ActionsNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type Actions")
+}
+
+// Int64 returns the Actions value as an int64.
+func (i Actions) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the Actions value from an int64.
+func (i *Actions) SetInt64(in int64) {
+ *i = Actions(in)
+}
+
+// Desc returns the description of the Actions value.
+func (i Actions) Desc() string {
+ if str, ok := _ActionsDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// ActionsValues returns all possible values
+// for the type Actions.
+func ActionsValues() []Actions {
+ return _ActionsValues
+}
+
+// Values returns all possible values
+// for the type Actions.
+func (i Actions) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_ActionsValues))
+ for i, d := range _ActionsValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type Actions.
+func (i Actions) IsValid() bool {
+ _, ok := _ActionsMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i Actions) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *Actions) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _ParadigmsValues = []Paradigms{0}
+
+// ParadigmsN is the highest valid value
+// for type Paradigms, plus one.
+const ParadigmsN Paradigms = 1
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _ParadigmsNoOp() {
+ var x [1]struct{}
+ _ = x[Approach-(0)]
+}
+
+var _ParadigmsNameToValueMap = map[string]Paradigms{
+ `Approach`: 0,
+ `approach`: 0,
+}
+
+var _ParadigmsDescMap = map[Paradigms]string{
+ 0: `Approach is a basic case where one Drive (chosen at random each trial) is fully active and others are at InactiveDrives levels -- goal is to approach the CS associated with the Drive-satisfying US, and avoid negative any negative USs. USs are always placed in same Arms (NArms must be >= NUSs -- any additional Arms are filled at random with additional US copies)`,
+}
+
+var _ParadigmsMap = map[Paradigms]string{
+ 0: `Approach`,
+}
+
+// String returns the string representation
+// of this Paradigms value.
+func (i Paradigms) String() string {
+ if str, ok := _ParadigmsMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the Paradigms value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *Paradigms) SetString(s string) error {
+ if val, ok := _ParadigmsNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _ParadigmsNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type Paradigms")
+}
+
+// Int64 returns the Paradigms value as an int64.
+func (i Paradigms) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the Paradigms value from an int64.
+func (i *Paradigms) SetInt64(in int64) {
+ *i = Paradigms(in)
+}
+
+// Desc returns the description of the Paradigms value.
+func (i Paradigms) Desc() string {
+ if str, ok := _ParadigmsDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// ParadigmsValues returns all possible values
+// for the type Paradigms.
+func ParadigmsValues() []Paradigms {
+ return _ParadigmsValues
+}
+
+// Values returns all possible values
+// for the type Paradigms.
+func (i Paradigms) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_ParadigmsValues))
+ for i, d := range _ParadigmsValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type Paradigms.
+func (i Paradigms) IsValid() bool {
+ _, ok := _ParadigmsMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i Paradigms) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *Paradigms) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
+
+var _TraceStatesValues = []TraceStates{0, 1, 2, 3, 4, 5, 6, 7}
+
+// TraceStatesN is the highest valid value
+// for type TraceStates, plus one.
+const TraceStatesN TraceStates = 8
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _TraceStatesNoOp() {
+ var x [1]struct{}
+ _ = x[TrSearching-(0)]
+ _ = x[TrDeciding-(1)]
+ _ = x[TrJustEngaged-(2)]
+ _ = x[TrApproaching-(3)]
+ _ = x[TrConsuming-(4)]
+ _ = x[TrRewarded-(5)]
+ _ = x[TrGiveUp-(6)]
+ _ = x[TrBumping-(7)]
+}
+
+var _TraceStatesNameToValueMap = map[string]TraceStates{
+ `TrSearching`: 0,
+ `trsearching`: 0,
+ `TrDeciding`: 1,
+ `trdeciding`: 1,
+ `TrJustEngaged`: 2,
+ `trjustengaged`: 2,
+ `TrApproaching`: 3,
+ `trapproaching`: 3,
+ `TrConsuming`: 4,
+ `trconsuming`: 4,
+ `TrRewarded`: 5,
+ `trrewarded`: 5,
+ `TrGiveUp`: 6,
+ `trgiveup`: 6,
+ `TrBumping`: 7,
+ `trbumping`: 7,
+}
+
+var _TraceStatesDescMap = map[TraceStates]string{
+ 0: `Searching is not yet goal engaged, looking for a goal`,
+ 1: `Deciding is having some partial gating but not in time for action`,
+ 2: `JustEngaged means just decided to engage in a goal`,
+ 3: `Approaching is goal engaged, approaching the goal`,
+ 4: `Consuming is consuming the US, first step (prior to getting reward, step1)`,
+ 5: `Rewarded is just received reward from a US`,
+ 6: `GiveUp is when goal is abandoned`,
+ 7: `Bumping is bumping into a wall`,
+}
+
+var _TraceStatesMap = map[TraceStates]string{
+ 0: `TrSearching`,
+ 1: `TrDeciding`,
+ 2: `TrJustEngaged`,
+ 3: `TrApproaching`,
+ 4: `TrConsuming`,
+ 5: `TrRewarded`,
+ 6: `TrGiveUp`,
+ 7: `TrBumping`,
+}
+
+// String returns the string representation
+// of this TraceStates value.
+func (i TraceStates) String() string {
+ if str, ok := _TraceStatesMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the TraceStates value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *TraceStates) SetString(s string) error {
+ if val, ok := _TraceStatesNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _TraceStatesNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type TraceStates")
+}
+
+// Int64 returns the TraceStates value as an int64.
+func (i TraceStates) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the TraceStates value from an int64.
+func (i *TraceStates) SetInt64(in int64) {
+ *i = TraceStates(in)
+}
+
+// Desc returns the description of the TraceStates value.
+func (i TraceStates) Desc() string {
+ if str, ok := _TraceStatesDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// TraceStatesValues returns all possible values
+// for the type TraceStates.
+func TraceStatesValues() []TraceStates {
+ return _TraceStatesValues
+}
+
+// Values returns all possible values
+// for the type TraceStates.
+func (i TraceStates) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_TraceStatesValues))
+ for i, d := range _TraceStatesValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type TraceStates.
+func (i TraceStates) IsValid() bool {
+ _, ok := _TraceStatesMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i TraceStates) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *TraceStates) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
diff --git a/examples/boa/armaze/gtigen.go b/examples/boa/armaze/gtigen.go
new file mode 100644
index 000000000..72c3cf579
--- /dev/null
+++ b/examples/boa/armaze/gtigen.go
@@ -0,0 +1,266 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package armaze
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.Arm",
+ ShortName: "armaze.Arm",
+ IDName: "arm",
+ Doc: "Arm represents the properties of a given arm of the N-maze.\nArms have characteristic distance and effort factors for getting\ndown the arm, and typically have a distinctive CS visible at the start\nand a US at the end, which has US-specific parameters for\nactually delivering reward or punishment.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Length", >i.Field{Name: "Length", Type: "int", LocalType: "int", Doc: "length of arm: distance from CS start to US end for this arm", Directives: gti.Directives{}, Tag: ""}},
+ {"Effort", >i.Field{Name: "Effort", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "range of different effort levels per step (uniformly randomly sampled per step) for going down this arm", Directives: gti.Directives{}, Tag: ""}},
+ {"US", >i.Field{Name: "US", Type: "int", LocalType: "int", Doc: "index of US present at the end of this arm -- -1 if none", Directives: gti.Directives{}, Tag: ""}},
+ {"CS", >i.Field{Name: "CS", Type: "int", LocalType: "int", Doc: "index of CS visible at the start of this arm, -1 if none", Directives: gti.Directives{}, Tag: ""}},
+ {"ExValue", >i.Field{Name: "ExValue", Type: "float32", LocalType: "float32", Doc: "current expected value = US.Prob * US.Mag * Drives-- computed at start of new approach", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ExPVpos", >i.Field{Name: "ExPVpos", Type: "float32", LocalType: "float32", Doc: "current expected PVpos value = normalized ExValue -- computed at start of new approach", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ExPVneg", >i.Field{Name: "ExPVneg", Type: "float32", LocalType: "float32", Doc: "current expected PVneg value = normalized time and effort costs", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ExUtil", >i.Field{Name: "ExUtil", Type: "float32", LocalType: "float32", Doc: "current expected utility = effort discounted version of ExPVpos -- computed at start of new approach", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.USParams",
+ ShortName: "armaze.USParams",
+ IDName: "us-params",
+ Doc: "USParams has parameters for different USs",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Negative", >i.Field{Name: "Negative", Type: "bool", LocalType: "bool", Doc: "if true is a negative valence US -- these are after the first NDrives USs", Directives: gti.Directives{}, Tag: ""}},
+ {"Mag", >i.Field{Name: "Mag", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "range of different magnitudes (uniformly sampled)", Directives: gti.Directives{}, Tag: ""}},
+ {"Prob", >i.Field{Name: "Prob", Type: "float32", LocalType: "float32", Doc: "probability of delivering the US", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.Params",
+ ShortName: "armaze.Params",
+ IDName: "params",
+ Doc: "Params are misc environment parameters",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"TurnEffort", >i.Field{Name: "TurnEffort", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "effort for turning", Directives: gti.Directives{}, Tag: "nest:\"+\" def:\"{'Min':0.5, 'Max':0.5}\""}},
+ {"ConsumeEffort", >i.Field{Name: "ConsumeEffort", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "effort for consuming US", Directives: gti.Directives{}, Tag: "nest:\"+\" def:\"{'Min':0.5, 'Max':0.5}\""}},
+ {"AlwaysLeft", >i.Field{Name: "AlwaysLeft", Type: "bool", LocalType: "bool", Doc: "always turn left -- zoolander style -- reduces degrees of freedom in evaluating behavior", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"PermuteCSs", >i.Field{Name: "PermuteCSs", Type: "bool", LocalType: "bool", Doc: "permute the order of CSs prior to applying them to arms -- having this off makes it easier to visually determine match between Drive and arm approach, and shouldn't make any difference to behavior (model doesn't know about this ordering).", Directives: gti.Directives{}, Tag: "def:\"false\""}},
+ {"RandomStart", >i.Field{Name: "RandomStart", Type: "bool", LocalType: "bool", Doc: "after running down an Arm, a new random starting location is selected (otherwise same arm as last run)", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"OpenArms", >i.Field{Name: "OpenArms", Type: "bool", LocalType: "bool", Doc: "if true, allow movement between arms just by going Left or Right -- otherwise once past the start, no switching is allowed", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Inactive", >i.Field{Name: "Inactive", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "strength of inactive inputs (e.g., Drives in Approach paradigm)", Directives: gti.Directives{}, Tag: "nest:\"+\" def:\"{'Min':0, 'Max':0}\" view:\"inline\""}},
+ {"NYReps", >i.Field{Name: "NYReps", Type: "int", LocalType: "int", Doc: "number of Y-axis repetitions of localist stimuli -- for redundancy in spiking nets", Directives: gti.Directives{}, Tag: "def:\"4\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.Config",
+ ShortName: "armaze.Config",
+ IDName: "config",
+ Doc: "Config has environment configuration",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Paradigm", >i.Field{Name: "Paradigm", Type: "github.com/emer/axon/examples/boa/armaze.Paradigms", LocalType: "Paradigms", Doc: "experimental paradigm that governs the configuration and updating of environment state over time and the appropriate evaluation criteria.", Directives: gti.Directives{}, Tag: ""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "for debugging, print out key steps including a trace of the action generation logic", Directives: gti.Directives{}, Tag: ""}},
+ {"NDrives", >i.Field{Name: "NDrives", Type: "int", LocalType: "int", Doc: "number of different drive-like body states (hunger, thirst, etc), that are satisfied by a corresponding positive US outcome -- this does not include the first curiosity drive", Directives: gti.Directives{}, Tag: ""}},
+ {"NNegUSs", >i.Field{Name: "NNegUSs", Type: "int", LocalType: "int", Doc: "number of negative US outcomes -- these are added after NDrives positive USs to total US list", Directives: gti.Directives{}, Tag: ""}},
+ {"NUSs", >i.Field{Name: "NUSs", Type: "int", LocalType: "int", Doc: "total number of USs = NDrives + NNegUSs", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NArms", >i.Field{Name: "NArms", Type: "int", LocalType: "int", Doc: "number of different arms", Directives: gti.Directives{}, Tag: ""}},
+ {"MaxArmLength", >i.Field{Name: "MaxArmLength", Type: "int", LocalType: "int", Doc: "maximum arm length (distance)", Directives: gti.Directives{}, Tag: ""}},
+ {"NCSs", >i.Field{Name: "NCSs", Type: "int", LocalType: "int", Doc: "number of different CSs -- typically at least a unique CS per US -- relationship is determined in the US params", Directives: gti.Directives{}, Tag: ""}},
+ {"USs", >i.Field{Name: "USs", Type: "[]*github.com/emer/axon/examples/boa/armaze.USParams", LocalType: "[]*USParams", Doc: "parameters associated with each US. The first NDrives are positive USs, and beyond that are negative USs", Directives: gti.Directives{}, Tag: ""}},
+ {"Arms", >i.Field{Name: "Arms", Type: "[]*github.com/emer/axon/examples/boa/armaze.Arm", LocalType: "[]*Arm", Doc: "state of each arm: dist, effort, US, CS", Directives: gti.Directives{}, Tag: ""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/boa/armaze.Params", LocalType: "Params", Doc: "misc params", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.Geom",
+ ShortName: "armaze.Geom",
+ IDName: "geom",
+ Doc: "Geom is overall geometry of the space",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"ArmWidth", >i.Field{Name: "ArmWidth", Type: "float32", LocalType: "float32", Doc: "width of arm -- emery rodent is 1 unit wide", Directives: gti.Directives{}, Tag: "def:\"2\""}},
+ {"ArmSpace", >i.Field{Name: "ArmSpace", Type: "float32", LocalType: "float32", Doc: "total space between arms, ends up being divided on either side", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"LengthScale", >i.Field{Name: "LengthScale", Type: "float32", LocalType: "float32", Doc: "multiplier per unit arm length -- keep square with width", Directives: gti.Directives{}, Tag: "def:\"2\""}},
+ {"Thick", >i.Field{Name: "Thick", Type: "float32", LocalType: "float32", Doc: "thickness of walls, floor", Directives: gti.Directives{}, Tag: "def:\"0.1\""}},
+ {"Height", >i.Field{Name: "Height", Type: "float32", LocalType: "float32", Doc: "height of walls", Directives: gti.Directives{}, Tag: "def:\"0.2\""}},
+ {"ArmWidthTot", >i.Field{Name: "ArmWidthTot", Type: "float32", LocalType: "float32", Doc: "width + space", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Depth", >i.Field{Name: "Depth", Type: "float32", LocalType: "float32", Doc: "computed total depth, starts at 0 goes deep", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Width", >i.Field{Name: "Width", Type: "float32", LocalType: "float32", Doc: "computed total width", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"HalfWidth", >i.Field{Name: "HalfWidth", Type: "float32", LocalType: "float32", Doc: "half width for centering on 0 X", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.GUI",
+ ShortName: "armaze.GUI",
+ IDName: "gui",
+ Doc: "GUI renders multiple views of the flat world env",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Disp", >i.Field{Name: "Disp", Type: "bool", LocalType: "bool", Doc: "update display -- turn off to make it faster", Directives: gti.Directives{}, Tag: ""}},
+ {"Env", >i.Field{Name: "Env", Type: "*github.com/emer/axon/examples/boa/armaze.Env", LocalType: "*Env", Doc: "the env being visualized", Directives: gti.Directives{}, Tag: ""}},
+ {"EnvName", >i.Field{Name: "EnvName", Type: "string", LocalType: "string", Doc: "name of current env -- number is NData index", Directives: gti.Directives{}, Tag: ""}},
+ {"Scene3D", >i.Field{Name: "Scene3D", Type: "*goki.dev/gi/v2/xyzv.Scene3D", LocalType: "*xyzv.Scene3D", Doc: "3D visualization of the Scene", Directives: gti.Directives{}, Tag: ""}},
+ {"Scene2D", >i.Field{Name: "Scene2D", Type: "*goki.dev/gi/v2/gi.SVG", LocalType: "*gi.SVG", Doc: "2D visualization of the Scene", Directives: gti.Directives{}, Tag: ""}},
+ {"MatColors", >i.Field{Name: "MatColors", Type: "[]string", LocalType: "[]string", Doc: "list of material colors", Directives: gti.Directives{}, Tag: ""}},
+ {"StateColors", >i.Field{Name: "StateColors", Type: "map[string]string", LocalType: "map[string]string", Doc: "internal state colors", Directives: gti.Directives{}, Tag: ""}},
+ {"WallSize", >i.Field{Name: "WallSize", Type: "goki.dev/mat32/v2.Vec2", LocalType: "mat32.Vec2", Doc: "thickness (X) and height (Y) of walls", Directives: gti.Directives{}, Tag: ""}},
+ {"State", >i.Field{Name: "State", Type: "github.com/emer/axon/examples/boa/armaze.TraceStates", LocalType: "TraceStates", Doc: "current internal / behavioral state", Directives: gti.Directives{}, Tag: ""}},
+ {"Trace", >i.Field{Name: "Trace", Type: "github.com/emer/axon/examples/boa/armaze.StateTrace", LocalType: "StateTrace", Doc: "trace record of recent activity", Directives: gti.Directives{}, Tag: ""}},
+ {"StructView", >i.Field{Name: "StructView", Type: "*goki.dev/gi/v2/giv.StructView", LocalType: "*giv.StructView", Doc: "view of the gui obj", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"WorldTabs", >i.Field{Name: "WorldTabs", Type: "*goki.dev/gi/v2/gi.Tabs", LocalType: "*gi.Tabs", Doc: "ArmMaze TabView", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"IsRunning", >i.Field{Name: "IsRunning", Type: "bool", LocalType: "bool", Doc: "ArmMaze is running", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"DepthVals", >i.Field{Name: "DepthVals", Type: "[]float32", LocalType: "[]float32", Doc: "current depth map", Directives: gti.Directives{}, Tag: ""}},
+ {"Camera", >i.Field{Name: "Camera", Type: "github.com/emer/eve/v2/evev.Camera", LocalType: "evev.Camera", Doc: "offscreen render camera settings", Directives: gti.Directives{}, Tag: ""}},
+ {"DepthMap", >i.Field{Name: "DepthMap", Type: "goki.dev/gi/v2/giv.ColorMapName", LocalType: "giv.ColorMapName", Doc: "color map to use for rendering depth map", Directives: gti.Directives{}, Tag: ""}},
+ {"EyeRFullImg", >i.Field{Name: "EyeRFullImg", Type: "*goki.dev/gi/v2/gi.Image", LocalType: "*gi.Image", Doc: "first-person right-eye full field view", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"EyeRFovImg", >i.Field{Name: "EyeRFovImg", Type: "*goki.dev/gi/v2/gi.Image", LocalType: "*gi.Image", Doc: "first-person right-eye fovea view", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"DepthImg", >i.Field{Name: "DepthImg", Type: "*goki.dev/gi/v2/gi.Image", LocalType: "*gi.Image", Doc: "depth map bitmap view", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"USposPlot", >i.Field{Name: "USposPlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "plot of positive valence drives, active OFC US state, and reward", Directives: gti.Directives{}, Tag: ""}},
+ {"USposData", >i.Field{Name: "USposData", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "data for USPlot", Directives: gti.Directives{}, Tag: ""}},
+ {"USnegPlot", >i.Field{Name: "USnegPlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "plot of negative valence active OFC US state, and outcomes", Directives: gti.Directives{}, Tag: ""}},
+ {"USnegData", >i.Field{Name: "USnegData", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "data for USPlot", Directives: gti.Directives{}, Tag: ""}},
+ {"Geom", >i.Field{Name: "Geom", Type: "github.com/emer/axon/examples/boa/armaze.Geom", LocalType: "Geom", Doc: "geometry of world", Directives: gti.Directives{}, Tag: ""}},
+ {"World", >i.Field{Name: "World", Type: "*github.com/emer/eve/v2/eve.Group", LocalType: "*eve.Group", Doc: "world", Directives: gti.Directives{}, Tag: ""}},
+ {"View3D", >i.Field{Name: "View3D", Type: "*github.com/emer/eve/v2/evev.View", LocalType: "*evev.View", Doc: "3D view of world", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Emery", >i.Field{Name: "Emery", Type: "*github.com/emer/eve/v2/eve.Group", LocalType: "*eve.Group", Doc: "emer group", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Arms", >i.Field{Name: "Arms", Type: "*github.com/emer/eve/v2/eve.Group", LocalType: "*eve.Group", Doc: "arms group", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Stims", >i.Field{Name: "Stims", Type: "*github.com/emer/eve/v2/eve.Group", LocalType: "*eve.Group", Doc: "stims group", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"EyeR", >i.Field{Name: "EyeR", Type: "github.com/emer/eve/v2/eve.Body", LocalType: "eve.Body", Doc: "Right eye of emery", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Contacts", >i.Field{Name: "Contacts", Type: "github.com/emer/eve/v2/eve.Contacts", LocalType: "eve.Contacts", Doc: "contacts from last step, for body", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{
+ {"Left", >i.Method{Name: "Left", Doc: "", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"Right", >i.Method{Name: "Right", Doc: "", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"Forward", >i.Method{Name: "Forward", Doc: "", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"Consume", >i.Method{Name: "Consume", Doc: "", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ }),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.Actions",
+ ShortName: "armaze.Actions",
+ IDName: "actions",
+ Doc: "Actions is a list of mutually exclusive states\nfor tracing the behavior and internal state of Emery",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.Env",
+ ShortName: "armaze.Env",
+ IDName: "env",
+ Doc: "Env implements an N-armed maze (\"bandit\")\nwith each Arm having a distinctive CS stimulus visible at the start\n(could be one of multiple possibilities) and (some probability of)\na US outcome at the end of the maze (could be either positive\nor negative, with (variable) magnitude and probability.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Nm", >i.Field{Name: "Nm", Type: "string", LocalType: "string", Doc: "name of environment -- Train or Test", Directives: gti.Directives{}, Tag: ""}},
+ {"Di", >i.Field{Name: "Di", Type: "int", LocalType: "int", Doc: "our data parallel index", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/boa/armaze.Config", LocalType: "Config", Doc: "configuration parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Drives", >i.Field{Name: "Drives", Type: "[]float32", LocalType: "[]float32", Doc: "current drive strength for each of Config.NDrives in normalized 0-1 units of each drive: 0 = first sim drive, not curiosity", Directives: gti.Directives{}, Tag: ""}},
+ {"Arm", >i.Field{Name: "Arm", Type: "int", LocalType: "int", Doc: "arm-wise location: either facing (Pos=0) or in (Pos > 0)", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Pos", >i.Field{Name: "Pos", Type: "int", LocalType: "int", Doc: "current position in the Arm: 0 = at start looking in, otherwise at given distance into the arm", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Tick", >i.Field{Name: "Tick", Type: "int", LocalType: "int", Doc: "current integer time step since last NewStart", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"TrgDrive", >i.Field{Name: "TrgDrive", Type: "int", LocalType: "int", Doc: "current target drive, in paradigms where that is used (e.g., Approach)", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"USConsumed", >i.Field{Name: "USConsumed", Type: "int", LocalType: "int", Doc: "Current US being consumed -- is -1 unless being consumed", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"USValue", >i.Field{Name: "USValue", Type: "float32", LocalType: "float32", Doc: "reward or punishment value generated by the current US being consumed -- just the Magnitude of the US -- does NOT include any modulation by Drive", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"JustConsumed", >i.Field{Name: "JustConsumed", Type: "bool", LocalType: "bool", Doc: "just finished consuming a US -- ready to start doing something new", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ArmsMaxValue", >i.Field{Name: "ArmsMaxValue", Type: "[]int", LocalType: "[]int", Doc: "arm(s) with maximum Drive * Mag * Prob US outcomes", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"MaxValue", >i.Field{Name: "MaxValue", Type: "float32", LocalType: "float32", Doc: "maximum value for ArmsMaxValue arms", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ArmsMaxUtil", >i.Field{Name: "ArmsMaxUtil", Type: "[]int", LocalType: "[]int", Doc: "arm(s) with maximum Value outcome discounted by Effort", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"MaxUtil", >i.Field{Name: "MaxUtil", Type: "float32", LocalType: "float32", Doc: "maximum utility for ArmsMaxUtil arms", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ArmsNeg", >i.Field{Name: "ArmsNeg", Type: "[]int", LocalType: "[]int", Doc: "arm(s) with negative US outcomes", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"LastAct", >i.Field{Name: "LastAct", Type: "github.com/emer/axon/examples/boa/armaze.Actions", LocalType: "Actions", Doc: "last action taken", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Effort", >i.Field{Name: "Effort", Type: "float32", LocalType: "float32", Doc: "effort on current trial", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"LastCS", >i.Field{Name: "LastCS", Type: "int", LocalType: "int", Doc: "last CS seen", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"LastUS", >i.Field{Name: "LastUS", Type: "int", LocalType: "int", Doc: "last US -- previous trial", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ShouldGate", >i.Field{Name: "ShouldGate", Type: "bool", LocalType: "bool", Doc: "true if looking at correct CS for first time", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"JustGated", >i.Field{Name: "JustGated", Type: "bool", LocalType: "bool", Doc: "just gated on this trial -- set by sim-- used for instinct", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"HasGated", >i.Field{Name: "HasGated", Type: "bool", LocalType: "bool", Doc: "has gated at some point during sequence -- set by sim -- used for instinct", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"States", >i.Field{Name: "States", Type: "map[string]*goki.dev/etable/v2/etensor.Float32", LocalType: "map[string]*etensor.Float32", Doc: "named states -- e.g., USs, CSs, etc", Directives: gti.Directives{}, Tag: ""}},
+ {"MaxLength", >i.Field{Name: "MaxLength", Type: "int", LocalType: "int", Doc: "maximum length of any arm", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Rand", >i.Field{Name: "Rand", Type: "github.com/emer/emergent/v2/erand.SysRand", LocalType: "erand.SysRand", Doc: "random number generator for the env -- all random calls must use this", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeed", >i.Field{Name: "RndSeed", Type: "int64", LocalType: "int64", Doc: "random seed", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.Paradigms",
+ ShortName: "armaze.Paradigms",
+ IDName: "paradigms",
+ Doc: "Paradigms is a list of experimental paradigms that\ngovern the configuration and updating of environment\nstate over time and the appropriate evaluation criteria.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.TraceStates",
+ ShortName: "armaze.TraceStates",
+ IDName: "trace-states",
+ Doc: "TraceStates is a list of mutually exclusive states\nfor tracing the behavior and internal state of Emery",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.TraceRec",
+ ShortName: "armaze.TraceRec",
+ IDName: "trace-rec",
+ Doc: "TraceRec holds record of info for tracing behavior, state",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Time", >i.Field{Name: "Time", Type: "float32", LocalType: "float32", Doc: "absolute time", Directives: gti.Directives{}, Tag: ""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "int", LocalType: "int", Doc: "trial counter", Directives: gti.Directives{}, Tag: ""}},
+ {"Arm", >i.Field{Name: "Arm", Type: "int", LocalType: "int", Doc: "current arm", Directives: gti.Directives{}, Tag: ""}},
+ {"Pos", >i.Field{Name: "Pos", Type: "int", LocalType: "int", Doc: "position in arm", Directives: gti.Directives{}, Tag: ""}},
+ {"State", >i.Field{Name: "State", Type: "github.com/emer/axon/examples/boa/armaze.TraceStates", LocalType: "TraceStates", Doc: "behavioral / internal state summary", Directives: gti.Directives{}, Tag: ""}},
+ {"Drives", >i.Field{Name: "Drives", Type: "[]float32", LocalType: "[]float32", Doc: "NDrives current drive state level", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/boa/armaze.StateTrace",
+ ShortName: "armaze.StateTrace",
+ IDName: "state-trace",
+ Doc: "StateTrace holds trace records",
+ Directives: gti.Directives{},
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/boa/armaze/gui.go b/examples/boa/armaze/gui.go
index 37808e162..51a719335 100644
--- a/examples/boa/armaze/gui.go
+++ b/examples/boa/armaze/gui.go
@@ -6,54 +6,58 @@ package armaze
import (
"fmt"
+ "image/color"
"log"
"github.com/emer/axon/axon"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/etview"
- "github.com/emer/eve/eve"
- "github.com/emer/eve/evev"
- "github.com/goki/gi/colormap"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gi3d"
- "github.com/goki/gi/gist"
- "github.com/goki/gi/giv"
- "github.com/goki/gi/svg"
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "github.com/emer/eve/v2/eve"
+ "github.com/emer/eve/v2/evev"
+ "goki.dev/colors"
+ "goki.dev/colors/colormap"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/etview"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/gi/v2/xyzv"
+ "goki.dev/girl/styles"
+ "goki.dev/goosi/events"
+ "goki.dev/grr"
+ "goki.dev/icons"
+ "goki.dev/mat32/v2"
+ "goki.dev/xyz"
)
// Geom is overall geometry of the space
type Geom struct {
- // [def: 2] width of arm -- emery rodent is 1 unit wide
- ArmWidth float32 `def:"2" desc:"width of arm -- emery rodent is 1 unit wide"`
+ // width of arm -- emery rodent is 1 unit wide
+ ArmWidth float32 `def:"2"`
- // [def: 1] total space between arms, ends up being divided on either side
- ArmSpace float32 `def:"1" desc:"total space between arms, ends up being divided on either side"`
+ // total space between arms, ends up being divided on either side
+ ArmSpace float32 `def:"1"`
- // [def: 2] multiplier per unit arm length -- keep square with width
- LengthScale float32 `def:"2" desc:"multiplier per unit arm length -- keep square with width"`
+ // multiplier per unit arm length -- keep square with width
+ LengthScale float32 `def:"2"`
- // [def: 0.1] thickness of walls, floor
- Thick float32 `def:"0.1" desc:"thickness of walls, floor"`
+ // thickness of walls, floor
+ Thick float32 `def:"0.1"`
- // [def: 0.2] height of walls
- Height float32 `def:"0.2" desc:"height of walls"`
+ // height of walls
+ Height float32 `def:"0.2"`
// width + space
- ArmWidthTot float32 `inactive:"+" desc:"width + space"`
+ ArmWidthTot float32 `inactive:"+"`
// computed total depth, starts at 0 goes deep
- Depth float32 `inactive:"+" desc:"computed total depth, starts at 0 goes deep"`
+ Depth float32 `inactive:"+"`
// computed total width
- Width float32 `inactive:"+" desc:"computed total width"`
+ Width float32 `inactive:"+"`
// half width for centering on 0 X
- HalfWidth float32 `inactive:"+" desc:"half width for centering on 0 X"`
+ HalfWidth float32 `inactive:"+"`
}
func (ge *Geom) Config(nArms int, maxLen int) {
@@ -79,102 +83,102 @@ func (ge *Geom) Pos(arm, pos int) (x, y float32) {
type GUI struct {
// update display -- turn off to make it faster
- Disp bool `desc:"update display -- turn off to make it faster"`
+ Disp bool
// the env being visualized
- Env *Env `desc:"the env being visualized"`
+ Env *Env
// name of current env -- number is NData index
- EnvName string `desc:"name of current env -- number is NData index"`
+ EnvName string
+
+ // 3D visualization of the Scene
+ SceneView *xyzv.SceneView
+
+ // 2D visualization of the Scene
+ Scene2D *gi.SVG
// list of material colors
- MatColors []string `desc:"list of material colors"`
+ MatColors []string
// internal state colors
- StateColors map[string]string `desc:"internal state colors"`
+ StateColors map[string]string
// thickness (X) and height (Y) of walls
- WallSize mat32.Vec2 `desc:"thickness (X) and height (Y) of walls"`
+ WallSize mat32.Vec2
// current internal / behavioral state
- State TraceStates `desc:"current internal / behavioral state"`
+ State TraceStates
// trace record of recent activity
- Trace StateTrace `desc:"trace record of recent activity"`
+ Trace StateTrace
- // [view: -] view of the gui obj
- StructView *giv.StructView `view:"-" desc:"view of the gui obj"`
+ // view of the gui obj
+ StructView *giv.StructView `view:"-"`
- // [view: -] ArmMaze GUI window
- WorldWin *gi.Window `view:"-" desc:"ArmMaze GUI window"`
+ // ArmMaze TabView
+ WorldTabs *gi.Tabs `view:"-"`
- // [view: -] ArmMaze TabView
- WorldTabs *gi.TabView `view:"-" desc:"ArmMaze TabView"`
-
- // [view: -] ArmMaze is running
- IsRunning bool `view:"-" desc:"ArmMaze is running"`
+ // ArmMaze is running
+ IsRunning bool `view:"-"`
// current depth map
- DepthVals []float32 `desc:"current depth map"`
+ DepthVals []float32
// offscreen render camera settings
- Camera evev.Camera `desc:"offscreen render camera settings"`
+ Camera evev.Camera
// color map to use for rendering depth map
- DepthMap giv.ColorMapName `desc:"color map to use for rendering depth map"`
+ DepthMap giv.ColorMapName
- // [view: -] first-person right-eye full field view
- EyeRFullImg *gi.Bitmap `view:"-" desc:"first-person right-eye full field view"`
+ // first-person right-eye full field view
+ EyeRFullImg *gi.Image `view:"-"`
- // [view: -] first-person right-eye fovea view
- EyeRFovImg *gi.Bitmap `view:"-" desc:"first-person right-eye fovea view"`
+ // first-person right-eye fovea view
+ EyeRFovImg *gi.Image `view:"-"`
- // [view: -] depth map bitmap view
- DepthImg *gi.Bitmap `view:"-" desc:"depth map bitmap view"`
+ // depth map bitmap view
+ DepthImg *gi.Image `view:"-"`
// plot of positive valence drives, active OFC US state, and reward
- USposPlot *eplot.Plot2D `desc:"plot of positive valence drives, active OFC US state, and reward"`
+ USposPlot *eplot.Plot2D
// data for USPlot
- USposData *etable.Table `desc:"data for USPlot"`
+ USposData *etable.Table
// plot of negative valence active OFC US state, and outcomes
- USnegPlot *eplot.Plot2D `desc:"plot of negative valence active OFC US state, and outcomes"`
+ USnegPlot *eplot.Plot2D
// data for USPlot
- USnegData *etable.Table `desc:"data for USPlot"`
+ USnegData *etable.Table
// geometry of world
- Geom Geom `desc:"geometry of world"`
+ Geom Geom
// world
- World *eve.Group `desc:"world"`
-
- // [view: -] 3D view of world
- View3D *evev.View `view:"-" desc:"3D view of world"`
+ World *eve.Group
- // [view: -] emer group
- Emery *eve.Group `view:"-" desc:"emer group"`
+ // 3D view of world
+ View3D *evev.View `view:"-"`
- // [view: -] arms group
- Arms *eve.Group `view:"-" desc:"arms group"`
+ // emer group
+ Emery *eve.Group `view:"-"`
- // [view: -] stims group
- Stims *eve.Group `view:"-" desc:"stims group"`
+ // arms group
+ Arms *eve.Group `view:"-"`
- // [view: -] Right eye of emery
- EyeR eve.Body `view:"-" desc:"Right eye of emery"`
+ // stims group
+ Stims *eve.Group `view:"-"`
- // [view: -] contacts from last step, for body
- Contacts eve.Contacts `view:"-" desc:"contacts from last step, for body"`
+ // Right eye of emery
+ EyeR eve.Body `view:"-"`
- // [view: -] gui window
- Win *gi.Window `view:"-" desc:"gui window"`
+ // contacts from last step, for body
+ Contacts eve.Contacts `view:"-"`
}
// ConfigWorldGUI configures all the world view GUI elements
// pass an initial env to use for configuring
-func (vw *GUI) ConfigWorldGUI(ev *Env) *gi.Window {
+func (vw *GUI) ConfigWorldGUI(ev *Env) *gi.Body {
vw.Disp = true
vw.Env = ev
vw.EnvName = ev.Nm
@@ -194,215 +198,108 @@ func (vw *GUI) ConfigWorldGUI(ev *Env) *gi.Window {
}
vw.MatColors = []string{"blue", "orange", "red", "violet", "navy", "brown", "pink", "purple", "olive", "chartreuse", "cyan", "magenta", "salmon", "goldenrod", "SykBlue"}
- width := 1600
- height := 1200
-
- win := gi.NewMainWindow("armaze", "Arm Maze", width, height)
- vw.WorldWin = win
-
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
-
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
+ b := gi.NewBody("armaze").SetTitle("Arm Maze")
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMax()
+ split := gi.NewSplits(b, "split")
- svfr := gi.AddNewFrame(split, "svfr", gi.LayoutVert)
- svfr.SetStretchMax()
- svfr.SetReRenderAnchor()
-
- sv := giv.AddNewStructView(svfr, "sv")
- sv.SetStruct(vw)
- vw.StructView = sv
-
- imgLay := gi.AddNewLayout(svfr, "img-lay", gi.LayoutGrid)
- imgLay.SetProp("columns", 2)
- imgLay.SetProp("spacing", 8)
+ svfr := gi.NewFrame(split, "svfr")
+ svfr.Style(func(s *styles.Style) {
+ s.Direction = styles.Column
+ })
- gi.AddNewLabel(imgLay, "lab-img-fov", "Eye-View, Fovea:")
- gi.AddNewLabel(imgLay, "lab-img-full", "Full Field:")
+ vw.StructView = giv.NewStructView(svfr, "sv").SetStruct(vw)
+ imfr := gi.NewFrame(svfr).Style(func(s *styles.Style) {
+ s.Display = styles.Grid
+ s.Columns = 2
+ s.Grow.Set(0, 0)
+ })
+ gi.NewLabel(imfr).SetText("Eye-View, Fovea:")
+ gi.NewLabel(imfr).SetText("Full Field:")
- vw.EyeRFovImg = gi.AddNewBitmap(imgLay, "eye-r-fov-img")
+ vw.EyeRFovImg = gi.NewImage(imfr, "eye-r-fov-img")
vw.EyeRFovImg.SetSize(vw.Camera.Size)
- vw.EyeRFovImg.LayoutToImgSize()
- // vw.EyeRFovImg.SetProp("vertical-align", gist.AlignTop)
- vw.EyeRFullImg = gi.AddNewBitmap(imgLay, "eye-r-full-img")
+ vw.EyeRFullImg = gi.NewImage(imfr, "eye-r-full-img")
vw.EyeRFullImg.SetSize(vw.Camera.Size)
- vw.EyeRFullImg.LayoutToImgSize()
- // vw.EyeRFullImg.SetProp("vertical-align", gist.AlignTop)
-
- // gi.AddNewLabel(imfr, "lab-depth", "Right Eye Depth:")
- // vw.DepthImg = gi.AddNewBitmap(imfr, "depth-img")
- // vw.DepthImg.SetSize(vw.Camera.Size)
- // vw.DepthImg.LayoutToImgSize()
- // vw.DepthImg.SetProp("vertical-align", gist.AlignTop)
-
- vw.USposPlot = eplot.AddNewPlot2D(svfr, "us-pos")
- vw.USnegPlot = eplot.AddNewPlot2D(svfr, "us-neg")
- wd := 700
- vw.USposPlot.SetProp("max-width", wd)
- vw.USnegPlot.SetProp("max-width", wd)
- ht := 160
- vw.USposPlot.SetProp("max-height", ht)
- vw.USnegPlot.SetProp("max-height", ht)
- vw.USposPlot.SetProp("height", ht)
- vw.USnegPlot.SetProp("height", ht)
+
+ wd := float32(300)
+ ht := float32(100)
+ vw.USposPlot = eplot.NewPlot2D(svfr, "us-pos")
+ vw.USposPlot.Style(func(s *styles.Style) {
+ s.Min.X.Px(wd)
+ s.Min.Y.Px(ht)
+ s.Grow.Set(0, 0)
+ })
+
+ vw.USnegPlot = eplot.NewPlot2D(svfr, "us-neg")
+ vw.USnegPlot.Style(func(s *styles.Style) {
+ s.Min.X.Px(wd)
+ s.Min.Y.Px(ht)
+ s.Grow.Set(0, 0)
+ })
vw.ConfigUSPlots()
- tv := gi.AddNewTabView(split, "tv")
+ tv := gi.NewTabs(split)
vw.WorldTabs = tv
- scfr := tv.AddNewTab(gi.KiT_Frame, "3D View").(*gi.Frame)
- twofr := tv.AddNewTab(gi.KiT_Frame, "2D View").(*gi.Frame)
-
- scfr.SetStretchMax()
- twofr.SetStretchMax()
+ scfr := tv.NewTab("3D View")
+ twofr := tv.NewTab("2D View")
//////////////////////////////////////////
// 3D Scene
vw.ConfigWorld()
- scvw := gi3d.AddNewSceneView(scfr, "sceneview")
- scvw.SetStretchMax()
- scvw.Config()
- sc := scvw.Scene()
-
- // first, add lights, set camera
- sc.BgColor.SetUInt8(230, 230, 255, 255) // sky blue-ish
- gi3d.AddNewAmbientLight(sc, "ambient", 0.3, gi3d.DirectSun)
-
- dir := gi3d.AddNewDirLight(sc, "dir", 1, gi3d.DirectSun)
- dir.Pos.Set(0, 2, 1) // default: 0,1,1 = above and behind us (we are at 0,0,X)
-
- vw.ConfigView3D(sc)
-
- // grtx := gi3d.AddNewTextureFile(sc, "ground", "ground.png")
- // wdtx := gi3d.AddNewTextureFile(sc, "wood", "wood.png")
+ vw.SceneView = xyzv.NewSceneView(scfr, "sceneview")
+ vw.SceneView.Config()
+ se := vw.SceneView.SceneXYZ()
+ vw.ConfigView3D(se)
- // floorp := gi3d.AddNewPlane(sc, "floor-plane", 100, 100)
- // floor := gi3d.AddNewSolid(sc, sc, "floor", floorp.Name())
- // floor.Pose.Pos.Set(0, -5, 0)
- // // floor.Mat.Color.SetName("tan")
- // // floor.Mat.Emissive.SetName("brown")
- // floor.Mat.Bright = 2 // .5 for wood / brown
- // floor.Mat.SetTexture(sc, grtx)
- // floor.Mat.Tiling.Reveat.Set(40, 40)
+ se.Camera.Pose.Pos = mat32.V3(0, 29, -4)
+ se.Camera.LookAt(mat32.V3(0, 4, -5), mat32.V3(0, 1, 0))
+ se.SaveCamera("2")
- // sc.Camera.Pose.Pos = mat32.Vec3{0, 100, 0}
- // sc.Camera.LookAt(mat32.Vec3{0, 5, 0}, mat32.Vec3Y)
- // sc.SaveCamera("3")
-
- sc.Camera.Pose.Pos = mat32.Vec3{0, 29, -4}
- sc.Camera.LookAt(mat32.Vec3{0, 4, -5}, mat32.Vec3Y)
- sc.SaveCamera("2")
-
- sc.Camera.Pose.Pos = mat32.Vec3{0, 17, 21}
- sc.Camera.LookAt(mat32.Vec3{0, 3.6, 0}, mat32.Vec3Y)
- sc.SaveCamera("1")
- sc.SaveCamera("default")
+ se.Camera.Pose.Pos = mat32.V3(0, 17, 21)
+ se.Camera.LookAt(mat32.V3(0, 3.6, 0), mat32.V3(0, 1, 0))
+ se.SaveCamera("1")
+ se.SaveCamera("default")
//////////////////////////////////////////
// 2D Scene
- twov := svg.AddNewEditor(twofr, "sceneview")
- twov.Fill = true
- twov.SetProp("background-color", "lightgrey")
- twov.SetStretchMax()
- twov.InitScale()
- twov.Trans.Set(620, 560)
- twov.Scale = 20
- twov.SetTransform()
+ twov := gi.NewSVG(twofr, "sceneview")
+ twov.Style(func(s *styles.Style) {
+ twov.SVG.Fill = true
+ twov.SVG.Norm = true
+ twov.SVG.Root.ViewBox.Size.Set(vw.Geom.Width+4, vw.Geom.Depth+4)
+ twov.SVG.Root.ViewBox.Min.Set(-0.5*(vw.Geom.Width+4), -0.5*(vw.Geom.Depth+4))
+ twov.SetReadOnly(false)
+ })
//////////////////////////////////////////
// Toolbar
split.SetSplits(.4, .6)
- tbar.AddAction(gi.ActOpts{Label: "Init", Icon: "reset", Tooltip: "Init env.", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(!vw.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- vw.Env.Init(0)
- vp.SetFullReRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Reset Trace", Icon: "reset", Tooltip: "Reset the trace of position, state etc, shown in the 2D View", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(!vw.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- vw.Trace = nil
- vp.SetFullReRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Left", Icon: "wedge-left", Tooltip: "Rotate Left", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(!vw.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- vw.Left()
- vp.SetFullReRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Right", Icon: "wedge-right", Tooltip: "Rotate Right", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(!vw.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- vw.Right()
- vp.SetFullReRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Forward", Icon: "wedge-up", Tooltip: "Step Forward", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(!vw.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- vw.Forward()
- vp.SetFullReRender()
- })
-
- // tbar.AddAction(gi.ActOpts{Label: "Backward", Icon: "wedge-down", Tooltip: "Step Backward", UpdateFunc: func(act *gi.Action) {
- // act.SetActiveStateUpdt(!vw.IsRunning)
- // }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- // vw.Backward()
- // vp.SetFullReRender()
- // })
-
- tbar.AddAction(gi.ActOpts{Label: "Consume", Icon: "svg", Tooltip: "Consume item -- only if directly in front", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(!vw.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- vw.Consume()
- vp.SetFullReRender()
+ b.AddAppBar(func(tb *gi.Toolbar) {
+ gi.NewButton(tb).SetText("Init").SetIcon(icons.ClearAll).
+ SetTooltip("Init env").
+ OnClick(func(e events.Event) {
+ vw.Env.Init(0)
+ })
+ gi.NewButton(tb).SetText("Reset Trace").SetIcon(icons.Undo).
+ SetTooltip("Reset trace of position, etc, shown in 2D View").
+ OnClick(func(e events.Event) {
+ vw.Trace = nil
+ })
+ giv.NewFuncButton(tb, vw.Forward).SetText("Fwd").SetIcon(icons.SkipNext)
+ giv.NewFuncButton(tb, vw.Left).SetText("Left").SetIcon(icons.KeyboardArrowLeft)
+ giv.NewFuncButton(tb, vw.Right).SetText("Right").SetIcon(icons.KeyboardArrowRight)
+ giv.NewFuncButton(tb, vw.Right).SetText("Consume").SetIcon(icons.FoodBank)
+
+ gi.NewSeparator(tb)
})
-
- tbar.AddSeparator("sep-file")
-
- // tbar.AddAction(gi.ActOpts{Label: "Open Pats", Icon: "file-open", Tooltip: "Open bit patterns from .json file", UpdateFunc: func(act *gi.Action) {
- // act.SetActiveStateUpdt(!vw.IsRunning)
- // }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- // giv.CallMethod(vw.Env, "OpenPats", vp)
- // })
- //
- // tbar.AddAction(gi.ActOpts{Label: "Save Pats", Icon: "file-save", Tooltip: "Save bit patterns to .json file", UpdateFunc: func(act *gi.Action) {
- // act.SetActiveStateUpdt(!vw.IsRunning)
- // }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- // giv.CallMethod(vw.Env, "SavePats", vp)
- // })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.MainMenuUpdated()
- return win
+ return b
}
// ConfigWorld constructs a new virtual physics world for flat world
@@ -430,32 +327,32 @@ func (vw *GUI) ConfigWorld() {
func (vw *GUI) AddFloor(par *eve.Group, name string) *eve.Group {
ge := &vw.Geom
dp := ge.Depth + 3*ge.LengthScale
- rm := eve.AddNewGroup(par, name)
- floor := eve.AddNewBox(rm, "floor", mat32.Vec3{0, -ge.Thick / 2, -ge.Depth/2 - ge.LengthScale}, mat32.Vec3{ge.Width, ge.Thick, dp})
- floor.Color = "grey"
+ rm := eve.NewGroup(par, name)
+ eve.NewBox(rm, "floor").SetSize(mat32.V3(ge.Width, ge.Thick, dp)).
+ SetColor("grey").SetInitPos(mat32.V3(0, -ge.Thick/2, -ge.Depth/2-ge.LengthScale))
return rm
}
// ConfigArms adds all the arms
func (vw *GUI) ConfigArms(par *eve.Group) *eve.Group {
ev := vw.Env
- rm := eve.AddNewGroup(par, "arms")
+ rm := eve.NewGroup(par, "arms")
ge := &vw.Geom
exln := ge.LengthScale
halfarm := .5 * ge.ArmWidth
halfht := .5 * ge.Height
for i, arm := range ev.Config.Arms {
anm := fmt.Sprintf("arm_%d\n", i)
- agp := eve.AddNewGroup(rm, anm)
+ agp := eve.NewGroup(rm, anm)
x, _ := ge.Pos(i, 0)
ln := ge.LengthScale * float32(arm.Length)
halflen := .5*ln + exln
- // bwall := eve.AddNewBox(agp, "back-wall", mat32.Vec3{x, halfht, -ln - exln}, mat32.Vec3{ge.ArmWidth, ge.Height, ge.Thick})
- // bwall.Color = "blue"
- lwall := eve.AddNewBox(agp, "left-wall", mat32.Vec3{x - halfarm, halfht, -halflen}, mat32.Vec3{ge.Thick, ge.Height, ln})
- lwall.Color = "black" // "red"
- rwall := eve.AddNewBox(agp, "right-wall", mat32.Vec3{x + halfarm, halfht, -halflen}, mat32.Vec3{ge.Thick, ge.Height, ln})
- rwall.Color = "black" // "green"
+
+ eve.NewBox(agp, "left-wall").SetSize(mat32.V3(ge.Thick, ge.Height, ln)).
+ SetColor("black").SetInitPos(mat32.V3(x-halfarm, halfht, -halflen))
+
+ eve.NewBox(agp, "right-wall").SetSize(mat32.V3(ge.Thick, ge.Height, ln)).
+ SetColor("black").SetInitPos(mat32.V3(x+halfarm, halfht, -halflen))
}
return rm
}
@@ -464,7 +361,7 @@ func (vw *GUI) ConfigArms(par *eve.Group) *eve.Group {
func (vw *GUI) ConfigStims(par *eve.Group, name string, width, height float32) *eve.Group {
ev := vw.Env
ge := &vw.Geom
- stms := eve.AddNewGroup(par, name)
+ stms := eve.NewGroup(par, name)
exln := ge.LengthScale
// halfarm := .5 * ge.ArmWidth
usHt := ge.Height
@@ -476,10 +373,12 @@ func (vw *GUI) ConfigStims(par *eve.Group, name string, width, height float32) *
ln := ge.LengthScale * float32(arm.Length)
usnm := fmt.Sprintf("us_%d\n", i)
csnm := fmt.Sprintf("cs_%d\n", i)
- uso := eve.AddNewBox(stms, usnm, mat32.Vec3{float32(x), 0.5 * usHt, -ln - exln}, mat32.Vec3{ge.ArmWidth, usHt, usDp})
- uso.Color = vw.MatColors[arm.US]
- cso := eve.AddNewBox(stms, csnm, mat32.Vec3{float32(x), usHt + .5*csHt, -ln - 2*exln}, mat32.Vec3{ge.ArmWidth, csHt, ge.Thick})
- cso.Color = vw.MatColors[arm.CS]
+
+ eve.NewBox(stms, usnm).SetSize(mat32.V3(ge.ArmWidth, usHt, usDp)).
+ SetColor(vw.MatColors[arm.US]).SetInitPos(mat32.V3(x, 0.5*usHt, -ln-1.1*exln))
+
+ eve.NewBox(stms, csnm).SetSize(mat32.V3(ge.ArmWidth, csHt, ge.Thick)).
+ SetColor(vw.MatColors[arm.CS]).SetInitPos(mat32.V3(x, usHt+0.5*csHt, -ln-2*exln))
}
return stms
}
@@ -507,40 +406,46 @@ func (vw *GUI) UpdateStims() {
// ConfigEmery constructs a new Emery virtual hamster
func (vw *GUI) ConfigEmery(par *eve.Group, length float32) *eve.Group {
- emr := eve.AddNewGroup(par, "emery")
+ emr := eve.NewGroup(par, "emery")
height := length / 2
width := height
- body := eve.AddNewBox(emr, "body", mat32.Vec3{0, height / 2, 0}, mat32.Vec3{width, height, length})
- // body := eve.AddNewCapsule(emr, "body", mat32.Vec3{0, height / 2, 0}, height, width/2)
- // body := eve.AddNewCylinder(emr, "body", mat32.Vec3{0, height / 2, 0}, height, width/2)
- body.Color = "purple"
- body.SetDynamic()
+
+ eve.NewBox(emr, "body").SetSize(mat32.V3(width, height, length)).
+ SetColor("purple").SetDynamic().
+ SetInitPos(mat32.V3(0, height/2, 0))
headsz := height * 0.75
hhsz := .5 * headsz
- hgp := eve.AddNewGroup(emr, "head")
- hgp.Initial.Pos = mat32.Vec3{0, hhsz, -(length/2 + hhsz)}
+ hgp := eve.NewGroup(emr, "head").SetInitPos(mat32.V3(0, hhsz, -(length/2 + hhsz)))
+
+ eve.NewBox(hgp, "head").SetSize(mat32.V3(headsz, headsz, headsz)).
+ SetColor("tan").SetDynamic().SetInitPos(mat32.V3(0, 0, 0))
- head := eve.AddNewBox(hgp, "head", mat32.Vec3{0, 0, 0}, mat32.Vec3{headsz, headsz, headsz})
- head.Color = "tan"
- head.SetDynamic()
eyesz := headsz * .2
- eyel := eve.AddNewBox(hgp, "eye-l", mat32.Vec3{-hhsz * .6, headsz * .1, -(hhsz + eyesz*.3)}, mat32.Vec3{eyesz, eyesz * .5, eyesz * .2})
- eyel.Color = "green"
- eyel.SetDynamic()
+
+ eve.NewBox(hgp, "eye-l").SetSize(mat32.V3(eyesz, eyesz*.5, eyesz*.2)).
+ SetColor("green").SetDynamic().
+ SetInitPos(mat32.V3(-hhsz*.6, headsz*.1, -(hhsz + eyesz*.3)))
+
// note: centering this in head for now to get straight-on view
- eyer := eve.AddNewBox(hgp, "eye-r", mat32.Vec3{0, headsz * .1, -(hhsz + eyesz*.3)}, mat32.Vec3{eyesz, eyesz * .5, eyesz * .2})
- eyer.Color = "green"
- eyer.Initial.Quat.SetFromEuler(mat32.Vec3{-0.02, 0, 0}) // look a bit down
- eyer.SetDynamic()
+ eve.NewBox(hgp, "eye-r").SetSize(mat32.V3(eyesz, eyesz*.5, eyesz*.2)).
+ SetColor("green").SetDynamic().
+ SetInitPos(mat32.V3(0, headsz*.1, -(hhsz + eyesz*.3)))
+
return emr
}
// ConfigView3D makes the 3D view
-func (vw *GUI) ConfigView3D(sc *gi3d.Scene) {
+func (vw *GUI) ConfigView3D(se *xyz.Scene) {
+ se.BackgroundColor = colors.FromRGB(230, 230, 255) // sky blue-ish
+ xyz.NewAmbientLight(se, "ambient", 0.3, xyz.DirectSun)
+
+ dir := xyz.NewDirLight(se, "dir", 1, xyz.DirectSun)
+ dir.Pos.Set(0, 2, 1) // default: 0,1,1 = above and behind us (we are at 0,0,X)
+
// sc.MultiSample = 1 // we are using depth grab so we need this = 1
- wgp := gi3d.AddNewGroup(sc, sc, "world")
- vw.View3D = evev.NewView(vw.World, sc, wgp)
+ wgp := xyz.NewGroup(se, "world")
+ vw.View3D = evev.NewView(vw.World, se, wgp)
vw.View3D.InitLibrary() // this makes a basic library based on body shapes, sizes
// at this point the library can be updated to configure custom visualizations
// for any of the named bodies.
@@ -600,7 +505,7 @@ func (vw *GUI) GrabEyeImg() {
}
img, err := vw.View3D.Image()
if err == nil && img != nil {
- vw.EyeRFullImg.SetImage(img, 0, 0)
+ vw.EyeRFullImg.SetImage(img)
} else {
log.Println(err)
}
@@ -613,7 +518,7 @@ func (vw *GUI) GrabEyeImg() {
}
img, err = vw.View3D.Image()
if err == nil && img != nil {
- vw.EyeRFovImg.SetImage(img, 0, 0)
+ vw.EyeRFovImg.SetImage(img)
} else {
log.Println(err)
}
@@ -623,8 +528,6 @@ func (vw *GUI) GrabEyeImg() {
// vw.DepthVals = depth
// vw.ViewDepth(depth)
// }
- vw.View3D.Scene.Render2D()
- vw.View3D.Scene.DirectWinUpload()
}
// ViewDepth updates depth bitmap with depth data
@@ -632,7 +535,6 @@ func (vw *GUI) ViewDepth(depth []float32) {
cmap := colormap.AvailMaps[string(vw.DepthMap)]
vw.DepthImg.SetSize(vw.Camera.Size)
evev.DepthImage(vw.DepthImg.Pixels, depth, cmap, &vw.Camera)
- vw.DepthImg.UpdateSig()
}
func (vw *GUI) ConfigWorldView(tg *etview.TensorGrid) {
@@ -644,23 +546,22 @@ func (vw *GUI) ConfigWorldView(tg *etview.TensorGrid) {
cm.Name = cnm
cm.Indexed = true
nc := ev.Config.NCSs
- cm.Colors = make([]gist.Color, nc)
- cm.NoColor = gist.Black
+ cm.Colors = make([]color.RGBA, nc)
+ cm.NoColor = colors.Black
for i, cnm := range vw.MatColors {
- cm.Colors[i].SetString(cnm, nil)
+ cm.Colors[i] = grr.Log1(colors.FromString(cnm))
}
colormap.AvailMaps[cnm] = cm
}
tg.Disp.Defaults()
tg.Disp.ColorMap = giv.ColorMapName(cnm)
tg.Disp.GridFill = 1
- tg.SetStretchMax()
}
func (vw *GUI) UpdateWorld(ctx *axon.Context, ev *Env, net *axon.Network, state TraceStates) {
vw.State = state
vw.Trace.AddRec(ctx, uint32(ev.Di), ev, net, state)
- if vw.WorldWin == nil || !vw.Disp {
+ if vw.SceneView == nil || !vw.Disp {
return
}
@@ -668,7 +569,7 @@ func (vw *GUI) UpdateWorld(ctx *axon.Context, ev *Env, net *axon.Network, state
vw.Env = ev
vw.EnvName = ev.Nm
vw.Trace = nil
- vw.StructView.UpdateSig()
+ vw.StructView.UpdateFields()
}
vw.UpdateWorldGUI()
@@ -683,22 +584,31 @@ func (vw *GUI) SetEmeryPose() {
}
func (vw *GUI) UpdateWorldGUI() {
- if vw.WorldWin == nil || !vw.Disp {
+ if vw.SceneView == nil || !vw.Disp {
return
}
+ updt := vw.SceneView.Sc.UpdateStartAsync()
+ defer vw.SceneView.Sc.UpdateEndAsyncRender(updt)
+
// update state:
vw.SetEmeryPose()
vw.UpdateStims()
vw.World.WorldRelToAbs()
vw.View3D.UpdatePose()
vw.View3D.UpdateBodyView([]string{"body"})
+ // vw.View2D.UpdatePose()
// update views:
vw.GrabEyeImg()
- vw.View3D.Scene.UpdateSig()
+ if vw.SceneView.IsVisible() {
+ vw.SceneView.SetNeedsRender(true)
+ }
+ // if vw.Scene2D.IsVisible() {
+ // vw.Scene2D.SetNeedsRender(true)
+ // }
}
-func (vw *GUI) Left() {
+func (vw *GUI) Left() { //gti:add
ev := vw.Env
ev.InstinctAct(ev.JustGated, ev.HasGated)
ev.Action("Left", nil)
@@ -706,7 +616,7 @@ func (vw *GUI) Left() {
vw.UpdateWorldGUI()
}
-func (vw *GUI) Right() {
+func (vw *GUI) Right() { //gti:add
ev := vw.Env
ev.InstinctAct(ev.JustGated, ev.HasGated)
ev.Action("Right", nil)
@@ -714,7 +624,7 @@ func (vw *GUI) Right() {
vw.UpdateWorldGUI()
}
-func (vw *GUI) Forward() {
+func (vw *GUI) Forward() { //gti:add
ev := vw.Env
ev.InstinctAct(ev.JustGated, ev.HasGated)
ev.Action("Forward", nil)
@@ -722,7 +632,7 @@ func (vw *GUI) Forward() {
vw.UpdateWorldGUI()
}
-func (vw *GUI) Consume() {
+func (vw *GUI) Consume() { //gti:add
ev := vw.Env
ev.InstinctAct(ev.JustGated, ev.HasGated)
ev.Action("Consume", nil)
diff --git a/examples/boa/armaze/maze.go b/examples/boa/armaze/maze.go
index ad74a69d9..945c14cb6 100644
--- a/examples/boa/armaze/maze.go
+++ b/examples/boa/armaze/maze.go
@@ -13,21 +13,22 @@
// only allow switching at the start.
package armaze
+//go:generate goki generate -add-types
+
import (
"log"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/minmax"
- "github.com/goki/ki/kit"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/minmax"
)
// Actions is a list of mutually exclusive states
// for tracing the behavior and internal state of Emery
-type Actions int
+type Actions int32 //enums:enum
const (
Forward Actions = iota
@@ -35,13 +36,8 @@ const (
Right
Consume
None
- ActionsN
)
-//go:generate stringer -type=Actions
-
-var KiT_Actions = kit.Enums.AddEnum(ActionsN, kit.NotBitFlag, nil)
-
// General note on US / Drive indexes:
// The env does _not_ represent any built-in drives or USs (curiosity, effort, urgency)
// 0 = start of the sim-specific USs and Drives
@@ -54,85 +50,85 @@ var KiT_Actions = kit.Enums.AddEnum(ActionsN, kit.NotBitFlag, nil)
type Env struct {
// name of environment -- Train or Test
- Nm string `desc:"name of environment -- Train or Test"`
+ Nm string
// our data parallel index
- Di int `inactive:"+" desc:"our data parallel index"`
+ Di int `inactive:"+"`
// configuration parameters
- Config Config `desc:"configuration parameters"`
+ Config Config
// current drive strength for each of Config.NDrives in normalized 0-1 units of each drive: 0 = first sim drive, not curiosity
- Drives []float32 `desc:"current drive strength for each of Config.NDrives in normalized 0-1 units of each drive: 0 = first sim drive, not curiosity"`
+ Drives []float32
// arm-wise location: either facing (Pos=0) or in (Pos > 0)
- Arm int `inactive:"+" desc:"arm-wise location: either facing (Pos=0) or in (Pos > 0)"`
+ Arm int `inactive:"+"`
// current position in the Arm: 0 = at start looking in, otherwise at given distance into the arm
- Pos int `inactive:"+" desc:"current position in the Arm: 0 = at start looking in, otherwise at given distance into the arm"`
+ Pos int `inactive:"+"`
// current integer time step since last NewStart
- Tick int `inactive:"+" desc:"current integer time step since last NewStart"`
+ Tick int `inactive:"+"`
// current target drive, in paradigms where that is used (e.g., Approach)
- TrgDrive int `inactive:"+" desc:"current target drive, in paradigms where that is used (e.g., Approach)"`
+ TrgDrive int `inactive:"+"`
// Current US being consumed -- is -1 unless being consumed
- USConsumed int `inactive:"+" desc:"Current US being consumed -- is -1 unless being consumed"`
+ USConsumed int `inactive:"+"`
// reward or punishment value generated by the current US being consumed -- just the Magnitude of the US -- does NOT include any modulation by Drive
- USValue float32 `inactive:"+" desc:"reward or punishment value generated by the current US being consumed -- just the Magnitude of the US -- does NOT include any modulation by Drive"`
+ USValue float32 `inactive:"+"`
// just finished consuming a US -- ready to start doing something new
- JustConsumed bool `inactive:"+" desc:"just finished consuming a US -- ready to start doing something new"`
+ JustConsumed bool `inactive:"+"`
// arm(s) with maximum Drive * Mag * Prob US outcomes
- ArmsMaxValue []int `inactive:"+" desc:"arm(s) with maximum Drive * Mag * Prob US outcomes"`
+ ArmsMaxValue []int `inactive:"+"`
// maximum value for ArmsMaxValue arms
- MaxValue float32 `inactive:"+" desc:"maximum value for ArmsMaxValue arms"`
+ MaxValue float32 `inactive:"+"`
// arm(s) with maximum Value outcome discounted by Effort
- ArmsMaxUtil []int `inactive:"+" desc:"arm(s) with maximum Value outcome discounted by Effort"`
+ ArmsMaxUtil []int `inactive:"+"`
// maximum utility for ArmsMaxUtil arms
- MaxUtil float32 `inactive:"+" desc:"maximum utility for ArmsMaxUtil arms"`
+ MaxUtil float32 `inactive:"+"`
// arm(s) with negative US outcomes
- ArmsNeg []int `inactive:"+" desc:"arm(s) with negative US outcomes"`
+ ArmsNeg []int `inactive:"+"`
// last action taken
- LastAct Actions `inactive:"+" desc:"last action taken"`
+ LastAct Actions `inactive:"+"`
// effort on current trial
- Effort float32 `inactive:"+" desc:"effort on current trial"`
+ Effort float32 `inactive:"+"`
// last CS seen
- LastCS int `inactive:"+" desc:"last CS seen"`
+ LastCS int `inactive:"+"`
// last US -- previous trial
- LastUS int `inactive:"+" desc:"last US -- previous trial"`
+ LastUS int `inactive:"+"`
// true if looking at correct CS for first time
- ShouldGate bool `inactive:"+" desc:"true if looking at correct CS for first time"`
+ ShouldGate bool `inactive:"+"`
// just gated on this trial -- set by sim-- used for instinct
- JustGated bool `inactive:"+" desc:"just gated on this trial -- set by sim-- used for instinct"`
+ JustGated bool `inactive:"+"`
// has gated at some point during sequence -- set by sim -- used for instinct
- HasGated bool `inactive:"+" desc:"has gated at some point during sequence -- set by sim -- used for instinct"`
+ HasGated bool `inactive:"+"`
// named states -- e.g., USs, CSs, etc
- States map[string]*etensor.Float32 `desc:"named states -- e.g., USs, CSs, etc"`
+ States map[string]*etensor.Float32
// maximum length of any arm
- MaxLength int `inactive:"+" desc:"maximum length of any arm"`
+ MaxLength int `inactive:"+"`
- // [view: -] random number generator for the env -- all random calls must use this
- Rand erand.SysRand `view:"-" desc:"random number generator for the env -- all random calls must use this"`
+ // random number generator for the env -- all random calls must use this
+ Rand erand.SysRand `view:"-"`
// random seed
- RndSeed int64 `inactive:"+" desc:"random seed"`
+ RndSeed int64 `inactive:"+"`
}
const noUS = -1
@@ -428,7 +424,7 @@ func (ev *Env) DecodeLocalist(vt *etensor.Float32) int {
// update the state accordingly.
func (ev *Env) Action(action string, nop etensor.Tensor) {
act := None
- act.FromString(action)
+ act.SetString(action)
ev.LastAct = act
ev.RenderAction(act) // plus phase input is action
// note: action not taken via TakeAct until start of trial in Step()
diff --git a/examples/boa/armaze/paradigms.go b/examples/boa/armaze/paradigms.go
index 41e1ee3aa..5c104eb94 100644
--- a/examples/boa/armaze/paradigms.go
+++ b/examples/boa/armaze/paradigms.go
@@ -4,24 +4,16 @@
package armaze
-import "github.com/goki/ki/kit"
-
// Paradigms is a list of experimental paradigms that
// govern the configuration and updating of environment
// state over time and the appropriate evaluation criteria.
-type Paradigms int
+type Paradigms int32 //enums:enum
const (
// Approach is a basic case where one Drive (chosen at random each trial) is fully active and others are at InactiveDrives levels -- goal is to approach the CS associated with the Drive-satisfying US, and avoid negative any negative USs. USs are always placed in same Arms (NArms must be >= NUSs -- any additional Arms are filled at random with additional US copies)
Approach Paradigms = iota
-
- ParadigmsN
)
-//go:generate stringer -type=Paradigms
-
-var KiT_Paradigms = kit.Enums.AddEnum(ParadigmsN, kit.NotBitFlag, nil)
-
///////////////////////////////////////////////
// Approach
diff --git a/examples/boa/armaze/paradigms_string.go b/examples/boa/armaze/paradigms_string.go
deleted file mode 100644
index f9db013a1..000000000
--- a/examples/boa/armaze/paradigms_string.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Code generated by "stringer -type=Paradigms"; DO NOT EDIT.
-
-package armaze
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[Approach-0]
- _ = x[ParadigmsN-1]
-}
-
-const _Paradigms_name = "ApproachParadigmsN"
-
-var _Paradigms_index = [...]uint8{0, 8, 18}
-
-func (i Paradigms) String() string {
- if i < 0 || i >= Paradigms(len(_Paradigms_index)-1) {
- return "Paradigms(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Paradigms_name[_Paradigms_index[i]:_Paradigms_index[i+1]]
-}
-
-func (i *Paradigms) FromString(s string) error {
- for j := 0; j < len(_Paradigms_index)-1; j++ {
- if s == _Paradigms_name[_Paradigms_index[j]:_Paradigms_index[j+1]] {
- *i = Paradigms(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: Paradigms")
-}
-
-var _Paradigms_descMap = map[Paradigms]string{
- 0: `Approach is a basic case where one Drive (chosen at random each trial) is fully active and others are at InactiveDrives levels -- goal is to approach the CS associated with the Drive-satisfying US, and avoid negative any negative USs. USs are always placed in same Arms (NArms must be >= NUSs -- any additional Arms are filled at random with additional US copies)`,
- 1: ``,
-}
-
-func (i Paradigms) Desc() string {
- if str, ok := _Paradigms_descMap[i]; ok {
- return str
- }
- return "Paradigms(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/examples/boa/armaze/trace.go b/examples/boa/armaze/trace.go
index e0cb69320..16abdce7f 100644
--- a/examples/boa/armaze/trace.go
+++ b/examples/boa/armaze/trace.go
@@ -6,12 +6,11 @@ package armaze
import (
"github.com/emer/axon/axon"
- "github.com/goki/ki/kit"
)
// TraceStates is a list of mutually exclusive states
// for tracing the behavior and internal state of Emery
-type TraceStates int
+type TraceStates int32 //enums:enum
const (
// Searching is not yet goal engaged, looking for a goal
@@ -37,34 +36,28 @@ const (
// Bumping is bumping into a wall
TrBumping
-
- TraceStatesN
)
-//go:generate stringer -type=TraceStates
-
-var KiT_TraceStates = kit.Enums.AddEnum(TraceStatesN, kit.NotBitFlag, nil)
-
// TraceRec holds record of info for tracing behavior, state
type TraceRec struct {
// absolute time
- Time float32 `desc:"absolute time"`
+ Time float32
// trial counter
- Trial int `desc:"trial counter"`
+ Trial int
// current arm
- Arm int `desc:"current arm"`
+ Arm int
// position in arm
- Pos int `desc:"position in arm"`
+ Pos int
// behavioral / internal state summary
- State TraceStates `desc:"behavioral / internal state summary"`
+ State TraceStates
// NDrives current drive state level
- Drives []float32 `desc:"NDrives current drive state level"`
+ Drives []float32
}
// StateTrace holds trace records
diff --git a/examples/boa/armaze/tracestates_string.go b/examples/boa/armaze/tracestates_string.go
deleted file mode 100644
index a0cebfb85..000000000
--- a/examples/boa/armaze/tracestates_string.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Code generated by "stringer -type=TraceStates"; DO NOT EDIT.
-
-package armaze
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[TrSearching-0]
- _ = x[TrDeciding-1]
- _ = x[TrJustEngaged-2]
- _ = x[TrApproaching-3]
- _ = x[TrConsuming-4]
- _ = x[TrRewarded-5]
- _ = x[TrGiveUp-6]
- _ = x[TrBumping-7]
- _ = x[TraceStatesN-8]
-}
-
-const _TraceStates_name = "TrSearchingTrDecidingTrJustEngagedTrApproachingTrConsumingTrRewardedTrGiveUpTrBumpingTraceStatesN"
-
-var _TraceStates_index = [...]uint8{0, 11, 21, 34, 47, 58, 68, 76, 85, 97}
-
-func (i TraceStates) String() string {
- if i < 0 || i >= TraceStates(len(_TraceStates_index)-1) {
- return "TraceStates(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _TraceStates_name[_TraceStates_index[i]:_TraceStates_index[i+1]]
-}
-
-func (i *TraceStates) FromString(s string) error {
- for j := 0; j < len(_TraceStates_index)-1; j++ {
- if s == _TraceStates_name[_TraceStates_index[j]:_TraceStates_index[j+1]] {
- *i = TraceStates(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: TraceStates")
-}
-
-var _TraceStates_descMap = map[TraceStates]string{
- 0: `Searching is not yet goal engaged, looking for a goal`,
- 1: `Deciding is having some partial gating but not in time for action`,
- 2: `JustEngaged means just decided to engage in a goal`,
- 3: `Approaching is goal engaged, approaching the goal`,
- 4: `Consuming is consuming the US, first step (prior to getting reward, step1)`,
- 5: `Rewarded is just received reward from a US`,
- 6: `GiveUp is when goal is abandoned`,
- 7: `Bumping is bumping into a wall`,
- 8: ``,
-}
-
-func (i TraceStates) Desc() string {
- if str, ok := _TraceStates_descMap[i]; ok {
- return str
- }
- return "TraceStates(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/examples/boa/boa.go b/examples/boa/boa.go
index 4dcdf6958..f888e64ad 100644
--- a/examples/boa/boa.go
+++ b/examples/boa/boa.go
@@ -7,6 +7,8 @@ boa: This project tests BG, OFC & ACC learning in a CS-driven approach task.
*/
package main
+//go:generate goki generate -add-types
+
import (
"fmt"
"log"
@@ -15,29 +17,29 @@ import (
"github.com/emer/axon/axon"
"github.com/emer/axon/examples/boa/armaze"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/timer"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/minmax"
- "github.com/emer/etable/split"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/ki/bools"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/timer"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/etable/v2/split"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/glop/num"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -45,7 +47,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -61,49 +63,49 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
// if true, stop running at end of a sequence (for NetView Di data parallel index)
- StopOnSeq bool `desc:"if true, stop running at end of a sequence (for NetView Di data parallel index)"`
+ StopOnSeq bool
// if true, stop running when an error programmed into the code occurs
- StopOnErr bool `desc:"if true, stop running when an error programmed into the code occurs"`
+ StopOnErr bool
- // [view: inline] network parameter management
- Params emer.NetParams `view:"inline" desc:"network parameter management"`
+ // network parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] gui for viewing env
- EnvGUI *armaze.GUI `view:"-" desc:"gui for viewing env"`
+ // gui for viewing env
+ EnvGUI *armaze.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
- // [view: -] testing data, from -test arg
- TestData map[string]float32 `view:"-" desc:"testing data, from -test arg"`
+ // testing data, from -test arg
+ TestData map[string]float32 `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -508,11 +510,11 @@ func (ss *Sim) TakeAction(net *axon.Network) {
ev.InstinctAct(justGated, hasGated)
csGated := (justGated && !pv.HasPosUS(ctx, diu))
deciding := !csGated && !hasGated && (axon.GlbV(ctx, diu, axon.GvACh) > threshold && mtxLy.Pool(0, diu).AvgMax.SpkMax.Cycle.Max > threshold) // give it time
- wasDeciding := bools.FromFloat32(ss.Stats.Float32Di("Deciding", di))
+ wasDeciding := num.ToBool(ss.Stats.Float32Di("Deciding", di))
if wasDeciding {
deciding = false // can't keep deciding!
}
- ss.Stats.SetFloat32Di("Deciding", di, bools.ToFloat32(deciding))
+ ss.Stats.SetFloat32Di("Deciding", di, num.FromBool[float32](deciding))
trSt := armaze.TrSearching
if hasGated {
@@ -607,7 +609,7 @@ func (ss *Sim) ApplyInputs() {
}
ev.Step()
if ev.Tick == 0 {
- ss.Stats.SetFloat32Di("CortexDriving", int(di), bools.ToFloat32(erand.BoolP32(ss.Config.Env.PctCortex, -1)))
+ ss.Stats.SetFloat32Di("CortexDriving", int(di), num.FromBool[float32](erand.BoolP32(ss.Config.Env.PctCortex, -1)))
ev.ExValueUtil(&ss.Net.PVLV, ctx)
}
for _, lnm := range lays {
@@ -880,9 +882,9 @@ func (ss *Sim) GatedStats(di int) {
ss.Stats.SetString("Debug", ss.Stats.StringDi("Debug", di))
ss.ActionStatsDi(di)
- ss.Stats.SetFloat32("JustGated", bools.ToFloat32(justGated))
- ss.Stats.SetFloat32("Should", bools.ToFloat32(ev.ShouldGate))
- ss.Stats.SetFloat32("HasGated", bools.ToFloat32(hasGated))
+ ss.Stats.SetFloat32("JustGated", num.FromBool[float32](justGated))
+ ss.Stats.SetFloat32("Should", num.FromBool[float32](ev.ShouldGate))
+ ss.Stats.SetFloat32("HasGated", num.FromBool[float32](hasGated))
ss.Stats.SetFloat32("GateUS", nan)
ss.Stats.SetFloat32("GateCS", nan)
ss.Stats.SetFloat32("GatedEarly", nan)
@@ -893,19 +895,19 @@ func (ss *Sim) GatedStats(di int) {
ss.Stats.SetFloat32("AChShouldnt", nan)
hasPos := pv.HasPosUS(ctx, diu)
if justGated {
- ss.Stats.SetFloat32("WrongCSGate", bools.ToFloat32(!ev.ArmIsMaxUtil(ev.Arm)))
+ ss.Stats.SetFloat32("WrongCSGate", num.FromBool[float32](!ev.ArmIsMaxUtil(ev.Arm)))
}
if ev.ShouldGate {
if hasPos {
- ss.Stats.SetFloat32("GateUS", bools.ToFloat32(justGated))
+ ss.Stats.SetFloat32("GateUS", num.FromBool[float32](justGated))
} else {
- ss.Stats.SetFloat32("GateCS", bools.ToFloat32(justGated))
+ ss.Stats.SetFloat32("GateCS", num.FromBool[float32](justGated))
}
} else {
if hasGated {
- ss.Stats.SetFloat32("GatedAgain", bools.ToFloat32(justGated))
+ ss.Stats.SetFloat32("GatedAgain", num.FromBool[float32](justGated))
} else { // !should gate means early..
- ss.Stats.SetFloat32("GatedEarly", bools.ToFloat32(justGated))
+ ss.Stats.SetFloat32("GatedEarly", num.FromBool[float32](justGated))
}
}
// We get get ACh when new CS or Rew
@@ -952,13 +954,13 @@ func (ss *Sim) MaintStats(di int) {
ss.Stats.SetFloat32(fnm, mat32.NaN())
if isFwd {
ss.Stats.SetFloat32(mnm, mact)
- ss.Stats.SetFloat32(fnm, bools.ToFloat32(!overThr))
+ ss.Stats.SetFloat32(fnm, num.FromBool[float32](!overThr))
} else if !isCons {
- ss.Stats.SetFloat32(pnm, bools.ToFloat32(overThr))
+ ss.Stats.SetFloat32(pnm, num.FromBool[float32](overThr))
}
}
if hasMaint {
- ss.Stats.SetFloat32("MaintEarly", bools.ToFloat32(!ev.ArmIsMaxUtil(ev.Arm)))
+ ss.Stats.SetFloat32("MaintEarly", num.FromBool[float32](!ev.ArmIsMaxUtil(ev.Arm)))
}
}
@@ -1202,14 +1204,15 @@ func (ss *Sim) UpdateEnvGUI(mode etime.Modes) {
dn.SetCellFloat("USin", int(i), float64(us))
dn.SetCellFloat("OFC", int(i), float64(ofc))
}
- ss.EnvGUI.USposPlot.Update()
+ ss.EnvGUI.USposPlot.GoUpdatePlot()
+ ss.EnvGUI.USnegPlot.GoUpdatePlot()
ss.EnvGUI.UpdateWorld(ctx, ev, net, armaze.TraceStates(ss.Stats.IntDi("TraceStateInt", di)))
}
// ConfigGUI configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGUI() *gi.Window {
- title := "BOA = BG, OFC ACC"
- ss.GUI.MakeWindow(ss, "boa", title, `This project tests learning in the BG, OFC & ACC for basic approach learning to a CS associated with a US. See axon on GitHub.`)
+func (ss *Sim) ConfigGUI() {
+ title := "BOA: BG, OFC ACC"
+ ss.GUI.MakeBody(ss, "boa", title, `This project tests learning in the BG, OFC & ACC for basic approach learning to a CS associated with a US. See axon on GitHub.`)
ss.GUI.CycleUpdateInterval = 20
nv := ss.GUI.AddNetView("NetView")
@@ -1218,8 +1221,8 @@ func (ss *Sim) ConfigGUI() *gi.Window {
nv.SetNet(ss.Net)
ss.ViewUpdt.Config(nv, etime.Phase, etime.Phase)
- nv.Scene().Camera.Pose.Pos.Set(0, 1.4, 2.6)
- nv.Scene().Camera.LookAt(mat32.Vec3{X: 0, Y: 0, Z: 0}, mat32.Vec3{X: 0, Y: 1, Z: 0})
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1.4, 2.6)
+ nv.SceneXYZ().Camera.LookAt(mat32.Vec3{}, mat32.V3(0, 1, 0))
ss.GUI.ViewUpdt = &ss.ViewUpdt
@@ -1229,45 +1232,47 @@ func (ss *Sim) ConfigGUI() *gi.Window {
axon.LayerActsLogConfigGUI(&ss.Logs, &ss.GUI)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
-
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train})
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all NRuns, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/boa/README.md")
- },
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train})
+
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all NRuns, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/boa/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
@@ -1276,17 +1281,17 @@ func (ss *Sim) ConfigGUI() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGUI()
+ ss.ConfigGUI()
ev := ss.Envs.ByModeDi(etime.Train, 0).(*armaze.Env)
ss.EnvGUI = &armaze.GUI{}
- fwin := ss.EnvGUI.ConfigWorldGUI(ev)
- fwin.GoStartEventLoop()
- win.StartEventLoop()
+ eb := ss.EnvGUI.ConfigWorldGUI(ev)
+ eb.Sc.App = ss.GUI.Body.Sc.App
+ eb.NewWindow().Run()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
// RecordTestData returns key testing data from the network
diff --git a/examples/boa/boa_test.go b/examples/boa/boa_test.go
index 3ba4d7691..26b6ac80f 100644
--- a/examples/boa/boa_test.go
+++ b/examples/boa/boa_test.go
@@ -13,8 +13,8 @@ import (
"testing"
"github.com/alecthomas/assert/v2"
- "github.com/emer/emergent/etime"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/etime"
+ "goki.dev/mat32/v2"
"golang.org/x/exp/maps"
)
diff --git a/examples/boa/config.go b/examples/boa/config.go
index 5424f346f..2b3c1fc3f 100644
--- a/examples/boa/config.go
+++ b/examples/boa/config.go
@@ -4,7 +4,7 @@
package main
-import "github.com/emer/empi/mpi"
+import "github.com/emer/empi/v2/mpi"
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
@@ -14,20 +14,20 @@ type EnvConfig struct {
// name of config file that loads into Env.Config for setting environment parameters directly
Config string `desc:"name of config file that loads into Env.Config for setting environment parameters directly"`
- // [def: 4] number of different drive-like body states (hunger, thirst, etc), that are satisfied by a corresponding US outcome
- NDrives int `def:"4" desc:"number of different drive-like body states (hunger, thirst, etc), that are satisfied by a corresponding US outcome"`
+ // number of different drive-like body states (hunger, thirst, etc), that are satisfied by a corresponding US outcome
+ NDrives int `def:"4"`
- // [def: 10] epoch when PctCortex starts increasing
- PctCortexStEpc int `def:"10" desc:"epoch when PctCortex starts increasing"`
+ // epoch when PctCortex starts increasing
+ PctCortexStEpc int `def:"10"`
- // [def: 1] number of epochs over which PctCortexMax is reached
- PctCortexNEpc int `def:"1" desc:"number of epochs over which PctCortexMax is reached"`
+ // number of epochs over which PctCortexMax is reached
+ PctCortexNEpc int `def:"1"`
// proportion of behavioral approach sequences driven by the cortex vs. hard-coded reflexive subcortical
- PctCortex float32 `inactive:"+" desc:"proportion of behavioral approach sequences driven by the cortex vs. hard-coded reflexive subcortical"`
+ PctCortex float32 `inactive:"+"`
// for testing, force each env to use same seed
- SameSeed bool `desc:"for testing, force each env to use same seed"`
+ SameSeed bool
}
// CurPctCortex returns current PctCortex and updates field, based on epoch counter
@@ -47,106 +47,106 @@ func (cfg *EnvConfig) CurPctCortex(epc int) float32 {
type ParamConfig struct {
// PVLV parameters -- can set any field/subfield on Net.PVLV params, using standard TOML formatting
- PVLV map[string]any `desc:"PVLV parameters -- can set any field/subfield on Net.PVLV params, using standard TOML formatting"`
+ PVLV map[string]any
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"true"`
- // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
- NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."`
+ // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
+ NData int `def:"16" min:"1"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 5] [min: 1] total number of runs to do when running Train
- NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"5" min:"1"`
- // [def: 100] total number of epochs per run
- NEpochs int `def:"100" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"100"`
- // [def: 128] total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `def:"128" desc:"total number of trials per epoch. Should be an even multiple of NData."`
+ // total number of trials per epoch. Should be an even multiple of NData.
+ NTrials int `def:"128"`
- // [def: 10] how frequently (in epochs) to compute PCA on hidden representations to measure variance?
- PCAInterval int `def:"10" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"`
+ // how frequently (in epochs) to compute PCA on hidden representations to measure variance?
+ PCAInterval int `def:"10"`
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
// activates testing mode -- records detailed data for Go CI tests (not the same as running test mode on network, via Looper)
- Testing bool `desc:"activates testing mode -- records detailed data for Go CI tests (not the same as running test mode on network, via Looper)"`
+ Testing bool
}
// Config is a standard Sim config -- use as a starting point.
type Config struct {
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
// if set, open given weights file at start of training
- OpenWts string `desc:"if set, open given weights file at start of training"`
+ OpenWts string
- // [view: add-fields] environment configuration options
- Env EnvConfig `view:"add-fields" desc:"environment configuration options"`
+ // environment configuration options
+ Env EnvConfig `view:"add-fields"`
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/boa/gtigen.go b/examples/boa/gtigen.go
new file mode 100644
index 000000000..037ae61fb
--- /dev/null
+++ b/examples/boa/gtigen.go
@@ -0,0 +1,131 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/boa.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"StopOnSeq", >i.Field{Name: "StopOnSeq", Type: "bool", LocalType: "bool", Doc: "if true, stop running at end of a sequence (for NetView Di data parallel index)", Directives: gti.Directives{}, Tag: ""}},
+ {"StopOnErr", >i.Field{Name: "StopOnErr", Type: "bool", LocalType: "bool", Doc: "if true, stop running when an error programmed into the code occurs", Directives: gti.Directives{}, Tag: ""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "network parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"Envs", >i.Field{Name: "Envs", Type: "github.com/emer/emergent/v2/env.Envs", LocalType: "env.Envs", Doc: "Environments", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"EnvGUI", >i.Field{Name: "EnvGUI", Type: "*github.com/emer/axon/examples/boa/armaze.GUI", LocalType: "*armaze.GUI", Doc: "gui for viewing env", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TestData", >i.Field{Name: "TestData", Type: "map[string]float32", LocalType: "map[string]float32", Doc: "testing data, from -test arg", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.EnvConfig",
+ ShortName: "main.EnvConfig",
+ IDName: "env-config",
+ Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Env", >i.Field{Name: "Env", Type: "map[string]any", LocalType: "map[string]any", Doc: "env parameters -- can set any field/subfield on Env struct, using standard TOML formatting", Directives: gti.Directives{}, Tag: ""}},
+ {"NDrives", >i.Field{Name: "NDrives", Type: "int", LocalType: "int", Doc: "number of different drive-like body states (hunger, thirst, etc), that are satisfied by a corresponding US outcome", Directives: gti.Directives{}, Tag: "def:\"4\""}},
+ {"PctCortexStEpc", >i.Field{Name: "PctCortexStEpc", Type: "int", LocalType: "int", Doc: "epoch when PctCortex starts increasing", Directives: gti.Directives{}, Tag: "def:\"10\""}},
+ {"PctCortexNEpc", >i.Field{Name: "PctCortexNEpc", Type: "int", LocalType: "int", Doc: "number of epochs over which PctCortexMax is reached", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"PctCortex", >i.Field{Name: "PctCortex", Type: "float32", LocalType: "float32", Doc: "proportion of behavioral approach sequences driven by the cortex vs. hard-coded reflexive subcortical", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"SameSeed", >i.Field{Name: "SameSeed", Type: "bool", LocalType: "bool", Doc: "for testing, force each env to use same seed", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"PVLV", >i.Field{Name: "PVLV", Type: "map[string]any", LocalType: "map[string]any", Doc: "PVLV parameters -- can set any field/subfield on Net.PVLV params, using standard TOML formatting", Directives: gti.Directives{}, Tag: ""}},
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"NData", >i.Field{Name: "NData", Type: "int", LocalType: "int", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.", Directives: gti.Directives{}, Tag: "def:\"16\" min:\"1\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"NRuns", >i.Field{Name: "NRuns", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"5\" min:\"1\""}},
+ {"NEpochs", >i.Field{Name: "NEpochs", Type: "int", LocalType: "int", Doc: "total number of epochs per run", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"NTrials", >i.Field{Name: "NTrials", Type: "int", LocalType: "int", Doc: "total number of trials per epoch. Should be an even multiple of NData.", Directives: gti.Directives{}, Tag: "def:\"128\""}},
+ {"PCAInterval", >i.Field{Name: "PCAInterval", Type: "int", LocalType: "int", Doc: "how frequently (in epochs) to compute PCA on hidden representations to measure variance?", Directives: gti.Directives{}, Tag: "def:\"10\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "bool", LocalType: "bool", Doc: "if true, save train epoch log to file, as .epc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Run", >i.Field{Name: "Run", Type: "bool", LocalType: "bool", Doc: "if true, save run log to file, as .run.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ {"Testing", >i.Field{Name: "Testing", Type: "bool", LocalType: "bool", Doc: "activates testing mode -- records detailed data for Go CI tests (not the same as running test mode on network, via Looper)", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"OpenWts", >i.Field{Name: "OpenWts", Type: "string", LocalType: "string", Doc: "if set, open given weights file at start of training", Directives: gti.Directives{}, Tag: ""}},
+ {"Env", >i.Field{Name: "Env", Type: "github.com/emer/axon/examples/boa.EnvConfig", LocalType: "EnvConfig", Doc: "environment configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/boa.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/boa.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/boa.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/boa/params.go b/examples/boa/params.go
index acb226181..d8f772a5a 100644
--- a/examples/boa/params.go
+++ b/examples/boa/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets is the active set of parameters -- Base is always applied,
diff --git a/examples/boa/params_good/config.toml b/examples/boa/params_good/config.toml
index cd5222839..cebaa6c71 100644
--- a/examples/boa/params_good/config.toml
+++ b/examples/boa/params_good/config.toml
@@ -3,14 +3,12 @@ Debug = false
OpenWts = ""
[Env]
+ Config = "4.uncertain_cs.toml"
NDrives = 4
- CSPerDrive = 1
PctCortexStEpc = 10
PctCortexNEpc = 1
PctCortex = 0.0
SameSeed = false
- [Env.Env]
- NYReps = 4
[Params]
Sheet = ""
@@ -21,8 +19,8 @@ OpenWts = ""
Good = true
[Run]
- GPU = true
- NData = 16
+ GPU = false
+ NData = 1
NThreads = 0
Run = 0
NRuns = 5
diff --git a/examples/boa/params_good/params.toml b/examples/boa/params_good/params.toml
index 14f05a390..226f9870c 100644
--- a/examples/boa/params_good/params.toml
+++ b/examples/boa/params_good/params.toml
@@ -112,7 +112,7 @@
"Layer.Learn.TrgAvgAct.GiBaseInit" = "0.5"
"Layer.VSPatch.Gain" = "3"
"Layer.VSPatch.ThrInit" = "0.15"
- "Layer.VSPatch.ThrLRate" = "0.002"
+ "Layer.VSPatch.ThrLRate" = "0.001"
"Layer.VSPatch.ThrNonRew" = "10"
[[Base]]
@@ -187,6 +187,7 @@
Sel = ".MatrixPrjn"
Desc = ""
[Base.Params]
+ "Prjn.Learn.Trace.LearnThr" = "0.0"
"Prjn.Matrix.NoGateLRate" = "1"
[[Base]]
@@ -211,7 +212,7 @@
Sel = ".BLAAcqToGo"
Desc = "must dominate"
[Base.Params]
- "Prjn.PrjnScale.Abs" = "3"
+ "Prjn.PrjnScale.Abs" = "2"
"Prjn.PrjnScale.Rel" = "1"
[[Base]]
diff --git a/examples/boa/params_good/params_all.txt b/examples/boa/params_good/params_all.txt
index 090fff526..af31c1d0f 100644
--- a/examples/boa/params_good/params_all.txt
+++ b/examples/boa/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
VTA: {
CeMGain: 0.75 LHbGain: 1.25
@@ -55,13 +55,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -81,7 +81,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: LDT
@@ -96,13 +96,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -122,7 +122,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
LDT: {
SrcThr: 0.05 Rew: true MaintInhib: 2 NotMaintMax: 0.4 SrcLay1Idx: 24 SrcLay2Idx: -1 SrcLay3Idx: -1 SrcLay4Idx: -1
@@ -140,13 +140,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -166,7 +166,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: USneg
@@ -181,13 +181,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -207,7 +207,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 1 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Negative DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: USposP
@@ -222,13 +222,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -248,7 +248,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 3
@@ -306,13 +306,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -332,7 +332,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 4
@@ -390,13 +390,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -416,7 +416,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: PVneg
@@ -431,13 +431,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -457,7 +457,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 1 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Negative DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: PVposP
@@ -472,13 +472,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -498,7 +498,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 7
@@ -616,13 +616,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -642,7 +642,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 8
@@ -760,13 +760,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -786,7 +786,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: DrivesP
@@ -801,13 +801,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -827,7 +827,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 11
@@ -885,13 +885,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -911,7 +911,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: VsGPi
@@ -926,13 +926,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -952,7 +952,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPi
@@ -1050,13 +1050,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1076,7 +1076,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPeOut
@@ -1134,13 +1134,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1160,7 +1160,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPeIn
@@ -1238,13 +1238,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1264,7 +1264,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPeTA
@@ -1322,13 +1322,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 3 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1348,7 +1348,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: VsGPeInToVsSTNp
@@ -1423,13 +1423,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 3 C50: 0.4 ActTau: 15 DeTau: 30 KCaR: 0.4 CaRDecayTau: 200 CaInThr: 0.01 CaInTau: 50 }
@@ -1449,7 +1449,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: BLAPosAcqD1ToVsSTNs
@@ -1504,13 +1504,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1530,7 +1530,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: false SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
}
Matrix: {
GateThr: 0.05 IsVS: true OtherMatrixIdx: 21 ThalLay1Idx: 35 ThalLay2Idx: 41 ThalLay3Idx: 46 ThalLay4Idx: 51 ThalLay5Idx: 58 ThalLay6Idx: -1
@@ -1591,7 +1591,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1604,7 +1604,7 @@ Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
PrjnScale: {
- Rel: 1 Abs: 3
+ Rel: 1 Abs: 2
}
SWt: {
Init: { SPct: 0 Mean: 0.5 Var: 0.4 Sym: false }
@@ -1614,7 +1614,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1637,7 +1637,7 @@ SWt: {
Learn: {
Learn: false
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1660,7 +1660,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1683,7 +1683,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1706,7 +1706,7 @@ SWt: {
Learn: {
Learn: false
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1729,7 +1729,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1749,13 +1749,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1775,7 +1775,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: false SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 0 DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Positive DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
}
Matrix: {
GateThr: 0.05 IsVS: true OtherMatrixIdx: 20 ThalLay1Idx: 35 ThalLay2Idx: 41 ThalLay3Idx: 46 ThalLay4Idx: 51 ThalLay5Idx: 58 ThalLay6Idx: -1
@@ -1836,7 +1836,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1849,7 +1849,7 @@ Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
PrjnScale: {
- Rel: 1 Abs: 3
+ Rel: 1 Abs: 2
}
SWt: {
Init: { SPct: 0 Mean: 0.5 Var: 0.4 Sym: false }
@@ -1859,7 +1859,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1882,7 +1882,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1905,7 +1905,7 @@ SWt: {
Learn: {
Learn: false
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1928,7 +1928,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1951,7 +1951,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1974,7 +1974,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1997,7 +1997,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -2020,7 +2020,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -2040,13 +2040,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2066,7 +2066,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: VsPatch
@@ -2081,13 +2081,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2107,10 +2107,10 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0.5 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.01 Diff: false SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: true DALRateMod: 0 AChLRateMod: 0.8 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: true DALRateMod: 0 AChLRateMod: 0.8 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
VSPatch: {
- Gain: 3 ThrInit: 0.15 ThrLRate: 0.002 ThrNonRew: 10
+ Gain: 3 ThrInit: 0.15 ThrLRate: 0.001 ThrNonRew: 10
}
///////////////////////////////////////////////////
Prjn: ACCnegValPTpToVsPatch
@@ -2265,13 +2265,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: true Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.05 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2291,7 +2291,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -2329,13 +2329,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2355,7 +2355,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.01 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
}
CT: {
GeGain: 0.1 DecayTau: 0
@@ -2386,7 +2386,7 @@ Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
PrjnScale: {
- Rel: 0.1 Abs: 2.5
+ Rel: 0.1 Abs: 3
}
SWt: {
Init: { SPct: 0 Mean: 0.5 Var: 0.4 Sym: true }
@@ -2459,13 +2459,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2485,7 +2485,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.01 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.1 DecayTau: 0
@@ -2589,13 +2589,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2615,7 +2615,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.01 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 1 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Negative DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.1 DecayTau: 0
@@ -2696,13 +2696,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2722,7 +2722,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.01 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 1 DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
+ NeuroMod: { DAMod: D2Mod Valence: Negative DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
}
CT: {
GeGain: 0.1 DecayTau: 0
@@ -2806,13 +2806,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2832,7 +2832,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: BLAPosAcqD1ToCeMPos
@@ -2887,13 +2887,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2913,7 +2913,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 1 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Negative DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: BLANegAcqD2ToCeMNeg
@@ -2968,13 +2968,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2994,7 +2994,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.01 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
}
CT: {
GeGain: 0.1 DecayTau: 0
@@ -3035,13 +3035,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3061,7 +3061,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -3267,7 +3267,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToOFCposUS
+Prjn: PosToOFCposUS
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -3287,7 +3287,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToOFCposUS
+Prjn: PosPToOFCposUS
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -3319,13 +3319,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3345,7 +3345,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -3471,7 +3471,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToOFCposUSCT
+Prjn: PosPToOFCposUSCT
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -3503,13 +3503,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3529,7 +3529,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCposUSMDToOFCposUSPT
@@ -3604,13 +3604,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3630,7 +3630,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCposUSPTToOFCposUSMD
@@ -3705,13 +3705,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3731,7 +3731,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.05 DecayTau: 50
@@ -3877,7 +3877,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToOFCposUSPTp
+Prjn: PosPToOFCposUSPTp
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -3897,7 +3897,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToOFCposUSPTp
+Prjn: PosToOFCposUSPTp
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -3929,13 +3929,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3955,7 +3955,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.4 DecayTau: 0
@@ -4073,13 +4073,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4099,7 +4099,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -4225,7 +4225,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToOFCnegUS
+Prjn: PosToOFCnegUS
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -4245,7 +4245,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToOFCnegUS
+Prjn: PosPToOFCnegUS
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -4277,13 +4277,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4303,7 +4303,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -4389,7 +4389,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToOFCnegUSCT
+Prjn: PosPToOFCnegUSCT
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -4421,13 +4421,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4447,7 +4447,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCnegUSMDToOFCnegUSPT
@@ -4522,13 +4522,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4548,7 +4548,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCnegUSPTToOFCnegUSMD
@@ -4623,13 +4623,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4649,7 +4649,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.05 DecayTau: 50
@@ -4735,7 +4735,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToOFCnegUSPTp
+Prjn: PosToOFCnegUSPTp
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -4755,7 +4755,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToOFCnegUSPTp
+Prjn: PosPToOFCnegUSPTp
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -4787,13 +4787,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4813,7 +4813,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -4899,7 +4899,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToOFCposVal
+Prjn: PosToOFCposVal
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -4919,7 +4919,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToOFCposVal
+Prjn: PosPToOFCposVal
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -4951,13 +4951,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4977,7 +4977,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -5043,7 +5043,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToOFCposValCT
+Prjn: PosPToOFCposValCT
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -5075,13 +5075,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5101,7 +5101,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCposValMDToOFCposValPT
@@ -5176,13 +5176,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5202,7 +5202,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCposValPTToOFCposValMD
@@ -5277,13 +5277,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5303,7 +5303,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.05 DecayTau: 50
@@ -5409,7 +5409,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToOFCposValPTp
+Prjn: PosPToOFCposValPTp
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -5429,7 +5429,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToOFCposValPTp
+Prjn: PosToOFCposValPTp
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -5461,13 +5461,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5487,7 +5487,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -5573,7 +5573,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToACCnegVal
+Prjn: PosToACCnegVal
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -5593,7 +5593,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToACCnegVal
+Prjn: PosPToACCnegVal
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -5625,13 +5625,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5651,7 +5651,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -5717,7 +5717,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToACCnegValCT
+Prjn: PosPToACCnegValCT
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -5749,13 +5749,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5775,7 +5775,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: ACCnegValMDToACCnegValPT
@@ -5850,13 +5850,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5876,7 +5876,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: ACCnegValPTToACCnegValMD
@@ -5951,13 +5951,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5977,7 +5977,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.05 DecayTau: 50
@@ -6083,7 +6083,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToACCnegValPTp
+Prjn: PosToACCnegValPTp
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -6103,7 +6103,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToACCnegValPTp
+Prjn: PosPToACCnegValPTp
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -6135,13 +6135,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6161,7 +6161,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 43
@@ -6219,13 +6219,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6245,7 +6245,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 48
@@ -6303,13 +6303,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6329,7 +6329,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -6427,13 +6427,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6453,7 +6453,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -6551,13 +6551,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6577,7 +6577,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: ACCutilMDToACCutilPT
@@ -6652,13 +6652,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6678,7 +6678,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: ACCutilPTToACCutilMD
@@ -6753,13 +6753,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6779,7 +6779,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.05 DecayTau: 50
@@ -6917,13 +6917,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6931,7 +6931,7 @@ Act: {
PopCode: { On: false Ge: 0.1 Min: -0.1 Max: 1.1 MinAct: 1 MinSigma: 0.1 MaxSigma: 0.1 Clip: true }
}
Inhib: {
- ActAvg: { Nominal: 0.08 AdaptGi: false Offset: 0 HiTol: 0 LoTol: 0.8 AdaptRate: 0.1 }
+ ActAvg: { Nominal: 0.04 AdaptGi: false Offset: 0 HiTol: 0 LoTol: 0.8 AdaptRate: 0.1 }
Layer: { On: true Gi: 0.9 FB: 1 FSTau: 6 SS: 30 SSfTau: 20 SSiTau: 50 FS0: 0.1 FFAvgTau: 50 FFPrv: 0 ClampExtMin: 0.05 }
Pool: { On: false Gi: 0.9 FB: 1 FSTau: 6 SS: 30 SSfTau: 20 SSiTau: 50 FS0: 0.1 FFAvgTau: 50 FFPrv: 0 ClampExtMin: 0.05 }
}
@@ -6943,7 +6943,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: CSP
@@ -6958,13 +6958,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6972,7 +6972,7 @@ Act: {
PopCode: { On: false Ge: 0.1 Min: -0.1 Max: 1.1 MinAct: 1 MinSigma: 0.1 MaxSigma: 0.1 Clip: true }
}
Inhib: {
- ActAvg: { Nominal: 0.08 AdaptGi: false Offset: 0 HiTol: 0 LoTol: 0.8 AdaptRate: 0.1 }
+ ActAvg: { Nominal: 0.04 AdaptGi: false Offset: 0 HiTol: 0 LoTol: 0.8 AdaptRate: 0.1 }
Layer: { On: true Gi: 1 FB: 1 FSTau: 6 SS: 30 SSfTau: 20 SSiTau: 50 FS0: 0.1 FFAvgTau: 50 FFPrv: 0 ClampExtMin: 0.05 }
Pool: { On: false Gi: 1 FB: 1 FSTau: 6 SS: 30 SSfTau: 20 SSiTau: 50 FS0: 0.1 FFAvgTau: 50 FFPrv: 0 ClampExtMin: 0.05 }
}
@@ -6984,7 +6984,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 60
@@ -7030,7 +7030,7 @@ Learn: {
} }
}
/////////////////////////////////////////////////
-Layer: Dist
+Layer: Pos
Act: {
Spikes: { Thr: 0.5 VmR: 0.3 Tr: 3 RTau: 1.6667 Exp: true ExpSlope: 0.02 ExpThr: 0.9 MaxHz: 180 ISITau: 5 ISIDt: 0.2 RDt: 0.599988 }
Dend: { GbarExp: 0.2 GbarR: 3 SSGi: 2 HasMod: false ModGain: 1 ModBase: 0 }
@@ -7042,13 +7042,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -7068,10 +7068,10 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
-Layer: DistP
+Layer: PosP
Act: {
Spikes: { Thr: 0.5 VmR: 0.3 Tr: 3 RTau: 1.6667 Exp: true ExpSlope: 0.02 ExpThr: 0.9 MaxHz: 180 ISITau: 5 ISIDt: 0.2 RDt: 0.599988 }
Dend: { GbarExp: 0.2 GbarR: 3 SSGi: 2 HasMod: false ModGain: 1 ModBase: 0 }
@@ -7083,13 +7083,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -7109,13 +7109,13 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 62
}
///////////////////////////////////////////////////
-Prjn: OFCposUSCTToDistP
+Prjn: OFCposUSCTToPosP
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7135,7 +7135,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: OFCposUSPTpToDistP
+Prjn: OFCposUSPTpToPosP
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7155,7 +7155,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: OFCposValCTToDistP
+Prjn: OFCposValCTToPosP
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7175,7 +7175,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: OFCposValPTpToDistP
+Prjn: OFCposValPTpToPosP
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7195,7 +7195,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: OFCnegUSCTToDistP
+Prjn: OFCnegUSCTToPosP
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7215,7 +7215,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: OFCnegUSPTpToDistP
+Prjn: OFCnegUSPTpToPosP
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7235,7 +7235,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: ACCnegValCTToDistP
+Prjn: ACCnegValCTToPosP
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7255,7 +7255,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: ACCnegValPTpToDistP
+Prjn: ACCnegValPTpToPosP
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7275,7 +7275,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: ALMCTToDistP
+Prjn: ALMCTToPosP
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7295,7 +7295,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: ALMPTpToDistP
+Prjn: ALMPTpToPosP
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7315,7 +7315,7 @@ Learn: {
} }
}
/////////////////////////////////////////////////
-Layer: Pos
+Layer: Arm
Act: {
Spikes: { Thr: 0.5 VmR: 0.3 Tr: 3 RTau: 1.6667 Exp: true ExpSlope: 0.02 ExpThr: 0.9 MaxHz: 180 ISITau: 5 ISIDt: 0.2 RDt: 0.599988 }
Dend: { GbarExp: 0.2 GbarR: 3 SSGi: 2 HasMod: false ModGain: 1 ModBase: 0 }
@@ -7327,13 +7327,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -7353,7 +7353,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: Act
@@ -7368,13 +7368,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -7394,7 +7394,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: VL
@@ -7409,13 +7409,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -7435,7 +7435,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 65
@@ -7553,13 +7553,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -7579,7 +7579,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -7625,7 +7625,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToM1
+Prjn: PosToM1
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7697,13 +7697,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -7723,7 +7723,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 1 DecayTau: 50
@@ -7781,13 +7781,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -7807,7 +7807,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 67
@@ -7865,13 +7865,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -7891,7 +7891,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -7957,7 +7957,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToALM
+Prjn: PosToALM
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -7977,7 +7977,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToALM
+Prjn: PosPToALM
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -8029,13 +8029,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -8055,7 +8055,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -8141,7 +8141,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToALMCT
+Prjn: PosPToALMCT
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -8173,13 +8173,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -8199,7 +8199,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: ALMMDToALMPT
@@ -8274,13 +8274,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -8300,7 +8300,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: ALMPTToALMMD
@@ -8375,13 +8375,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -8401,7 +8401,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.05 DecayTau: 50
@@ -8507,7 +8507,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistToALMPTp
+Prjn: PosToALMPTp
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
@@ -8527,7 +8527,7 @@ Learn: {
} }
}
///////////////////////////////////////////////////
-Prjn: DistPToALMPTp
+Prjn: PosPToALMPTp
Com: {
GType: ExcitatoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
diff --git a/examples/boa/params_good/params_layers.txt b/examples/boa/params_good/params_layers.txt
index d5b81dda1..c1c180222 100644
--- a/examples/boa/params_good/params_layers.txt
+++ b/examples/boa/params_good/params_layers.txt
@@ -191,19 +191,19 @@
ACCutilPTp Nominal: 0.10 Params: .PTPredLayer: 0.1
Layer.Gi: 0.80
- CS Nominal: 0.08 Params: .CS: 0.1
+ CS Nominal: 0.04 Params: .CS: 0.1
Layer.Gi: 0.90
- CSP Nominal: 0.08 Params: .CS: 0.1
+ CSP Nominal: 0.04 Params: .CS: 0.1
Layer.Gi: 1.00
- Dist Nominal: 0.10
+ Pos Nominal: 0.10
Layer.Gi: 0.90
- DistP Nominal: 0.10
+ PosP Nominal: 0.10
Layer.Gi: 1.00
- Pos Nominal: 0.10
+ Arm Nominal: 0.10
Layer.Gi: 0.90
Act Nominal: 0.10
diff --git a/examples/boa/params_good/params_nondef.txt b/examples/boa/params_good/params_nondef.txt
index 42e84ca7f..3f3291b2d 100644
--- a/examples/boa/params_good/params_nondef.txt
+++ b/examples/boa/params_good/params_nondef.txt
@@ -131,7 +131,7 @@ VsMtxGo.USposToVsMtxGo.PrjnScale.Abs: 2 // [Def: 1]
VsMtxGo.USposToVsMtxGo.SWts.Init.Var: 0.4 // [Def: 0.25]
VsMtxGo.USposToVsMtxGo.SWts.Init.Sym: false // [Def: true]
VsMtxGo.USposToVsMtxGo.Learn.LRate.Base: 0.02 // [Def: 0.04,0.1,0.2]
-VsMtxGo.BLAPosAcqD1ToVsMtxGo.PrjnScale.Abs: 3 // [Def: 1]
+VsMtxGo.BLAPosAcqD1ToVsMtxGo.PrjnScale.Abs: 2 // [Def: 1]
VsMtxGo.BLAPosAcqD1ToVsMtxGo.SWts.Init.Var: 0.4 // [Def: 0.25]
VsMtxGo.BLAPosAcqD1ToVsMtxGo.SWts.Init.Sym: false // [Def: true]
VsMtxGo.BLAPosAcqD1ToVsMtxGo.Learn.LRate.Base: 0.02 // [Def: 0.04,0.1,0.2]
@@ -169,7 +169,7 @@ VsMtxNo.VsGPeInToVsMtxNo.SWts.Adapt.SigGain: 1 // [Def: 6]
VsMtxNo.ACCnegValToVsMtxNo.SWts.Init.Var: 0.4 // [Def: 0.25]
VsMtxNo.ACCnegValToVsMtxNo.SWts.Init.Sym: false // [Def: true]
VsMtxNo.ACCnegValToVsMtxNo.Learn.LRate.Base: 0.02 // [Def: 0.04,0.1,0.2]
-VsMtxNo.BLANegAcqD2ToVsMtxNo.PrjnScale.Abs: 3 // [Def: 1]
+VsMtxNo.BLANegAcqD2ToVsMtxNo.PrjnScale.Abs: 2 // [Def: 1]
VsMtxNo.BLANegAcqD2ToVsMtxNo.SWts.Init.Var: 0.4 // [Def: 0.25]
VsMtxNo.BLANegAcqD2ToVsMtxNo.SWts.Init.Sym: false // [Def: true]
VsMtxNo.BLANegAcqD2ToVsMtxNo.Learn.LRate.Base: 0.02 // [Def: 0.04,0.1,0.2]
@@ -204,6 +204,7 @@ VsPatch.Inhib.Layer.FB: 0 // [Def: 0.5,1,4]
VsPatch.Inhib.Pool.Gi: 0.5 // [Def: 1,1.1,0.75,0.9]
VsPatch.Inhib.Pool.FB: 0 // [Def: 0.5,1,4]
VsPatch.Learn.RLRate.SigmoidMin: 0.01 // [Def: 0.05,1]
+VsPatch.VSPatch.ThrLRate: 0.001 // [Def: 0,0.002]
VsPatch.ACCnegValPTpToVsPatch.PrjnScale.Abs: 2 // [Def: 1]
VsPatch.ACCnegValPTpToVsPatch.SWts.Init.Mean: 0.1 // [Def: 0.5,0.4]
VsPatch.ACCnegValPTpToVsPatch.SWts.Init.Var: 0.05 // [Def: 0.25]
@@ -260,7 +261,7 @@ BLAPosAcqD1.BLAPosExtD2ToBLAPosAcqD1.SWts.Init.Mean: 0.8 // [Def: 0.5,0.4]
BLAPosAcqD1.BLAPosExtD2ToBLAPosAcqD1.SWts.Init.Var: 0 // [Def: 0.25]
BLAPosAcqD1.BLAPosExtD2ToBLAPosAcqD1.SWts.Init.Sym: false // [Def: true]
BLAPosAcqD1.BLAPosExtD2ToBLAPosAcqD1.SWts.Adapt.SigGain: 1 // [Def: 6]
-BLAPosAcqD1.BLANovelCSToBLAPosAcqD1.PrjnScale.Abs: 2.5 // [Def: 1]
+BLAPosAcqD1.BLANovelCSToBLAPosAcqD1.PrjnScale.Abs: 3 // [Def: 1]
BLAPosAcqD1.BLANovelCSToBLAPosAcqD1.SWts.Init.Var: 0.4 // [Def: 0.25]
BLAPosAcqD1.USposToBLAPosAcqD1.PrjnScale.Abs: 6 // [Def: 1]
BLAPosAcqD1.USposToBLAPosAcqD1.SWts.Init.Mean: 0.75 // [Def: 0.5,0.4]
@@ -369,8 +370,8 @@ OFCposUS.PVposPToOFCposUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposUS.OFCposValToOFCposUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposUS.CSToOFCposUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposUS.CSPToOFCposUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCposUS.DistToOFCposUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCposUS.DistPToOFCposUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCposUS.PosToOFCposUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCposUS.PosPToOFCposUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposUSCT.Acts.Sahp.Gbar: 1 // [Def: 0.05,0.1]
OFCposUSCT.Acts.NMDA.Gbar: 0.008 // [Def: 0,0.006,0.007]
OFCposUSCT.Acts.GabaB.Gbar: 0.009 // [Def: 0,0.012,0.015]
@@ -382,7 +383,7 @@ OFCposUSCT.DrivesPToOFCposUSCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposUSCT.PVposPToOFCposUSCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposUSCT.CSPToOFCposUSCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCposUSCT.DistPToOFCposUSCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCposUSCT.PosPToOFCposUSCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposUSPT.Acts.Sahp.Gbar: 0.01 // [Def: 0.05,0.1]
OFCposUSPT.Acts.GabaB.Gbar: 0.01 // [Def: 0,0.012,0.015]
OFCposUSPT.Inhib.Layer.Gi: 2.4 // [Def: 1,1.1,0.75,0.9]
@@ -427,9 +428,9 @@ OFCposUSPTp.PVposPToOFCposUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposUSPTp.CSToOFCposUSPTp.PrjnScale.Abs: 4 // [Def: 1]
OFCposUSPTp.CSToOFCposUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCposUSPTp.DistPToOFCposUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCposUSPTp.DistToOFCposUSPTp.PrjnScale.Abs: 4 // [Def: 1]
-OFCposUSPTp.DistToOFCposUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCposUSPTp.PosPToOFCposUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCposUSPTp.PosToOFCposUSPTp.PrjnScale.Abs: 4 // [Def: 1]
+OFCposUSPTp.PosToOFCposUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
NotMaint.Acts.Init.GeBase: 1.2 // [Def: 0]
NotMaint.Acts.Decay.Glong: 1 // [Def: 0,0.6]
NotMaint.Inhib.Layer.Gi: 0.5 // [Def: 1,1.1,0.75,0.9]
@@ -463,8 +464,8 @@ OFCnegUS.USnegToOFCnegUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCnegUS.USnegPToOFCnegUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCnegUS.PVnegPToOFCnegUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCnegUS.ACCnegValToOFCnegUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCnegUS.DistToOFCnegUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCnegUS.DistPToOFCnegUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCnegUS.PosToOFCnegUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCnegUS.PosPToOFCnegUS.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCnegUSCT.Acts.Sahp.Gbar: 1 // [Def: 0.05,0.1]
OFCnegUSCT.Acts.NMDA.Gbar: 0.008 // [Def: 0,0.006,0.007]
OFCnegUSCT.Acts.GabaB.Gbar: 0.009 // [Def: 0,0.012,0.015]
@@ -474,7 +475,7 @@ OFCnegUSCT.OFCnegUSToOFCnegUSCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCnegUSCT.USnegPToOFCnegUSCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCnegUSCT.PVnegPToOFCnegUSCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCnegUSCT.DistPToOFCnegUSCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCnegUSCT.PosPToOFCnegUSCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCnegUSPT.Acts.Sahp.Gbar: 0.01 // [Def: 0.05,0.1]
OFCnegUSPT.Acts.GabaB.Gbar: 0.01 // [Def: 0,0.012,0.015]
OFCnegUSPT.Inhib.Layer.Gi: 2.4 // [Def: 1,1.1,0.75,0.9]
@@ -515,16 +516,16 @@ OFCnegUSPTp.OFCnegUSPTToOFCnegUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2
OFCnegUSPTp.OFCnegUSCTToOFCnegUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCnegUSPTp.USnegPToOFCnegUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCnegUSPTp.PVnegPToOFCnegUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCnegUSPTp.DistToOFCnegUSPTp.PrjnScale.Abs: 4 // [Def: 1]
-OFCnegUSPTp.DistToOFCnegUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCnegUSPTp.DistPToOFCnegUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCnegUSPTp.PosToOFCnegUSPTp.PrjnScale.Abs: 4 // [Def: 1]
+OFCnegUSPTp.PosToOFCnegUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCnegUSPTp.PosPToOFCnegUSPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposVal.OFCposUSToOFCposVal.PrjnScale.Abs: 3 // [Def: 1]
OFCposVal.OFCposUSToOFCposVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposVal.PVposToOFCposVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposVal.PVposPToOFCposVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposVal.ACCutilToOFCposVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCposVal.DistToOFCposVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCposVal.DistPToOFCposVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCposVal.PosToOFCposVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCposVal.PosPToOFCposVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposValCT.Acts.Sahp.Gbar: 1 // [Def: 0.05,0.1]
OFCposValCT.Acts.NMDA.Gbar: 0.008 // [Def: 0,0.006,0.007]
OFCposValCT.Acts.GabaB.Gbar: 0.009 // [Def: 0,0.012,0.015]
@@ -533,7 +534,7 @@ OFCposValCT.Inhib.Pool.Gi: 2.2 // [Def: 1,1.1,0.75,0.9]
OFCposValCT.OFCposValToOFCposValCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposValCT.OFCposValPTToOFCposValCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposValCT.PVposPToOFCposValCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCposValCT.DistPToOFCposValCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCposValCT.PosPToOFCposValCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposValPT.Acts.Sahp.Gbar: 0.01 // [Def: 0.05,0.1]
OFCposValPT.Acts.GabaB.Gbar: 0.01 // [Def: 0,0.012,0.015]
OFCposValPT.Inhib.Layer.Gi: 2.4 // [Def: 1,1.1,0.75,0.9]
@@ -576,16 +577,16 @@ OFCposValPTp.PVposToOFCposValPTp.PrjnScale.Abs: 4 // [Def: 1]
OFCposValPTp.PVposToOFCposValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposValPTp.PVposPToOFCposValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposValPTp.PVposPToOFCposValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCposValPTp.DistPToOFCposValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-OFCposValPTp.DistToOFCposValPTp.PrjnScale.Abs: 4 // [Def: 1]
-OFCposValPTp.DistToOFCposValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCposValPTp.PosPToOFCposValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+OFCposValPTp.PosToOFCposValPTp.PrjnScale.Abs: 4 // [Def: 1]
+OFCposValPTp.PosToOFCposValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegVal.OFCnegUSToACCnegVal.PrjnScale.Abs: 3 // [Def: 1]
ACCnegVal.OFCnegUSToACCnegVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegVal.PVnegToACCnegVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegVal.PVnegPToACCnegVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegVal.ACCutilToACCnegVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-ACCnegVal.DistToACCnegVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-ACCnegVal.DistPToACCnegVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+ACCnegVal.PosToACCnegVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+ACCnegVal.PosPToACCnegVal.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegValCT.Acts.Sahp.Gbar: 1 // [Def: 0.05,0.1]
ACCnegValCT.Acts.NMDA.Gbar: 0.008 // [Def: 0,0.006,0.007]
ACCnegValCT.Acts.GabaB.Gbar: 0.009 // [Def: 0,0.012,0.015]
@@ -594,7 +595,7 @@ ACCnegValCT.Inhib.Pool.Gi: 2.2 // [Def: 1,1.1,0.75,0.9]
ACCnegValCT.ACCnegValToACCnegValCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegValCT.ACCnegValPTToACCnegValCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegValCT.PVnegPToACCnegValCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-ACCnegValCT.DistPToACCnegValCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+ACCnegValCT.PosPToACCnegValCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegValPT.Acts.Sahp.Gbar: 0.01 // [Def: 0.05,0.1]
ACCnegValPT.Acts.GabaB.Gbar: 0.01 // [Def: 0,0.012,0.015]
ACCnegValPT.Inhib.Layer.Gi: 2.4 // [Def: 1,1.1,0.75,0.9]
@@ -637,9 +638,9 @@ ACCnegValPTp.PVnegToACCnegValPTp.PrjnScale.Abs: 4 // [Def: 1]
ACCnegValPTp.PVnegToACCnegValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegValPTp.PVnegPToACCnegValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegValPTp.PVnegPToACCnegValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-ACCnegValPTp.DistToACCnegValPTp.PrjnScale.Abs: 4 // [Def: 1]
-ACCnegValPTp.DistToACCnegValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-ACCnegValPTp.DistPToACCnegValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+ACCnegValPTp.PosToACCnegValPTp.PrjnScale.Abs: 4 // [Def: 1]
+ACCnegValPTp.PosToACCnegValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+ACCnegValPTp.PosPToACCnegValPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposValP.ACCutilCTToOFCposValP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
OFCposValP.ACCutilPTpToOFCposValP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ACCnegValP.ACCutilCTToACCnegValP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
@@ -704,16 +705,16 @@ ACCutilPTp.ACCnegValToACCutilPTp.PrjnScale.Abs: 4 // [Def: 1]
ACCutilPTp.ACCnegValToACCutilPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
CSP.OFCposUSCTToCSP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
CSP.OFCposUSPTpToCSP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-DistP.OFCposUSCTToDistP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-DistP.OFCposUSPTpToDistP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-DistP.OFCposValCTToDistP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-DistP.OFCposValPTpToDistP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-DistP.OFCnegUSCTToDistP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-DistP.OFCnegUSPTpToDistP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-DistP.ACCnegValCTToDistP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-DistP.ACCnegValPTpToDistP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-DistP.ALMCTToDistP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-DistP.ALMPTpToDistP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+PosP.OFCposUSCTToPosP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+PosP.OFCposUSPTpToPosP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+PosP.OFCposValCTToPosP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+PosP.OFCposValPTpToPosP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+PosP.OFCnegUSCTToPosP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+PosP.OFCnegUSPTpToPosP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+PosP.ACCnegValCTToPosP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+PosP.ACCnegValPTpToPosP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+PosP.ALMCTToPosP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+PosP.ALMPTpToPosP.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
VL.M1CTToVL.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
VL.ALMCTToVL.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
VL.ALMPTpToVL.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
@@ -728,8 +729,8 @@ M1P.ALMPTpToM1P.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ALM.M1ToALM.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ALM.M1PToALM.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ALM.VLToALM.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-ALM.DistToALM.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-ALM.DistPToALM.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+ALM.PosToALM.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+ALM.PosPToALM.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ALMCT.Acts.Sahp.Gbar: 1 // [Def: 0.05,0.1]
ALMCT.Acts.NMDA.Gbar: 0.008 // [Def: 0,0.006,0.007]
ALMCT.Acts.GabaB.Gbar: 0.009 // [Def: 0,0.012,0.015]
@@ -739,7 +740,7 @@ ALMCT.ALMToALMCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ALMCT.ALMPTToALMCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ALMCT.M1PToALMCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ALMCT.VLToALMCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-ALMCT.DistPToALMCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+ALMCT.PosPToALMCT.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ALMPT.Acts.Sahp.Gbar: 0.01 // [Def: 0.05,0.1]
ALMPT.Acts.GabaB.Gbar: 0.01 // [Def: 0,0.012,0.015]
ALMPT.Inhib.Layer.Gi: 2.4 // [Def: 1,1.1,0.75,0.9]
@@ -782,6 +783,6 @@ ALMPTp.M1PToALMPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ALMPTp.M1ToALMPTp.PrjnScale.Abs: 4 // [Def: 1]
ALMPTp.M1ToALMPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
ALMPTp.VLToALMPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-ALMPTp.DistToALMPTp.PrjnScale.Abs: 4 // [Def: 1]
-ALMPTp.DistToALMPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
-ALMPTp.DistPToALMPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+ALMPTp.PosToALMPTp.PrjnScale.Abs: 4 // [Def: 1]
+ALMPTp.PosToALMPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
+ALMPTp.PosPToALMPTp.Learn.LRate.Base: 0.01 // [Def: 0.04,0.1,0.2]
diff --git a/examples/boa/params_good/params_prjns.txt b/examples/boa/params_good/params_prjns.txt
index 61961ae94..ab69cd87f 100644
--- a/examples/boa/params_good/params_prjns.txt
+++ b/examples/boa/params_good/params_prjns.txt
@@ -75,8 +75,8 @@ Layer: VsMtxGo
VsGPeTA InhibPrjn Abs: 2.00 Rel: 1.00 GScale: 0.04 Rel: 0.50
VsGPeIn InhibPrjn Abs: 0.50 Rel: 1.00 GScale: 0.01 Rel: 0.50
USpos MatrixPrjn Abs: 2.00 Rel: 0.20 GScale: 0.08 Rel: 0.12
- BLAPosAcqD1 MatrixPrjn Abs: 3.00 Rel: 1.00 GScale: 0.62 Rel: 0.62
- Abs Params: .BLAAcqToGo: 3
+ BLAPosAcqD1 MatrixPrjn Abs: 2.00 Rel: 1.00 GScale: 0.42 Rel: 0.62
+ Abs Params: .BLAAcqToGo: 2
Rel Params: .BLAAcqToGo: 1
Drives MatrixPrjn Abs: 1.00 Rel: 1.00 GScale: 1.00 Rel: 1.00
Abs Params: .DrivesToMtx: 1
@@ -94,8 +94,8 @@ Layer: VsMtxNo
VsGPeIn InhibPrjn Abs: 0.50 Rel: 1.00 GScale: 0.01 Rel: 0.50
ACCnegVal MatrixPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.04
Rel Params: .PFCToVSMtx: 0.1
- BLANegAcqD2 MatrixPrjn Abs: 3.00 Rel: 1.00 GScale: 0.38 Rel: 0.38
- Abs Params: .BLAAcqToGo: 3
+ BLANegAcqD2 MatrixPrjn Abs: 2.00 Rel: 1.00 GScale: 0.26 Rel: 0.38
+ Abs Params: .BLAAcqToGo: 2
Rel Params: .BLAAcqToGo: 1
BLAPosExtD2 MatrixPrjn Abs: 0.10 Rel: 1.00 GScale: 0.01 Rel: 0.38
Drives MatrixPrjn Abs: 1.00 Rel: 1.00 GScale: 1.00 Rel: 1.00
@@ -117,7 +117,7 @@ Layer: VsPatch
ACCnegValPTp VSPatchPrjn Abs: 2.00 Rel: 1.00 GScale: 0.08 Rel: 0.17
Abs Params: Base:.VSPatchPrjn: 2
Drives VSPatchPrjn Abs: 2.00 Rel: 1.00 GScale: 2.00 Rel: 1.00
- Abs Params: Base:.VSPatchPrjn: 2
+ Abs Params: .VSPatchPrjn: 2
OFCposUSPTp VSPatchPrjn Abs: 2.00 Rel: 1.00 GScale: 0.06 Rel: 0.17
Abs Params: Base:.VSPatchPrjn: 2
OFCposValPTp VSPatchPrjn Abs: 2.00 Rel: 1.00 GScale: 0.08 Rel: 0.17
@@ -136,7 +136,7 @@ Layer: SC
Layer: BLAPosAcqD1
BLAPosExtD2 InhibPrjn Abs: 0.50 Rel: 1.00 GScale: 0.17 Rel: 1.00
Abs Params: Base:.BLAExtToAcq: 0.5
- BLANovelCS ForwardPrjn Abs: 2.50 Rel: 0.10 GScale: 0.16 Rel: 0.06
+ BLANovelCS ForwardPrjn Abs: 3.00 Rel: 0.10 GScale: 0.19 Rel: 0.06
Abs Params: Base:.BLAFromNovel: 3
USpos BLAPrjn Abs: 6.00 Rel: 0.50 GScale: 0.62 Rel: 0.31
CS BLAPrjn Abs: 1.50 Rel: 1.00 GScale: 0.94 Rel: 0.62
@@ -181,8 +181,8 @@ Layer: OFCposUS
OFCposVal BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.05
CS BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.05 Rel: 0.05
CSP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.05 Rel: 0.05
- Dist BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.05
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.05
+ Pos BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.05
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.05
Layer: OFCposUSCT
OFCposUS CTCtxtPrjn Abs: 1.00 Rel: 1.00 GScale: 0.40 Rel: 0.40
@@ -191,7 +191,7 @@ Layer: OFCposUSCT
USposP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.04
PVposP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.04
CSP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.04 Rel: 0.04
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
Layer: OFCposUSPT
OFCposUSMD BackPrjn Abs: 1.00 Rel: 1.00 GScale: 1.00 Rel: 1.00
@@ -217,8 +217,8 @@ Layer: OFCposUSPTp
CSP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.02
CS ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.89 Rel: 0.22
Abs Params: .ToPTp: 4
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
- Dist ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.44 Rel: 0.22
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
+ Pos ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.44 Rel: 0.22
Abs Params: .ToPTp: 4
Layer: NotMaint
@@ -235,15 +235,15 @@ Layer: OFCnegUS
USnegP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
PVnegP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.04
ACCnegVal BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.04
- Dist ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.19 Rel: 0.38
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
+ Pos ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.19 Rel: 0.38
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
Layer: OFCnegUSCT
OFCnegUS CTCtxtPrjn Abs: 1.00 Rel: 1.00 GScale: 0.43 Rel: 0.43
OFCnegUSPT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.05 Rel: 0.43
USnegP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
PVnegP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.04
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
Layer: OFCnegUSPT
OFCnegUSMD BackPrjn Abs: 1.00 Rel: 1.00 GScale: 1.00 Rel: 1.00
@@ -265,9 +265,9 @@ Layer: OFCnegUSPTp
OFCnegUSCT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.10 Rel: 0.30
USnegP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.03
PVnegP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.03
- Dist ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.61 Rel: 0.30
+ Pos ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.61 Rel: 0.30
Abs Params: .ToPTp: 4
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.03
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.03
Layer: OFCposVal
OFCposUS ForwardPrjn Abs: 3.00 Rel: 1.00 GScale: 0.25 Rel: 0.42
@@ -275,14 +275,14 @@ Layer: OFCposVal
PVpos ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.14 Rel: 0.42
PVposP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.04
ACCutil BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.04
- Dist BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
+ Pos BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
Layer: OFCposValCT
OFCposVal CTCtxtPrjn Abs: 1.00 Rel: 1.00 GScale: 0.45 Rel: 0.45
OFCposValPT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.04 Rel: 0.45
PVposP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.05
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.05
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.05
Layer: OFCposValPT
OFCposValMD BackPrjn Abs: 1.00 Rel: 1.00 GScale: 1.00 Rel: 1.00
@@ -306,8 +306,8 @@ Layer: OFCposValPTp
Abs Params: .ToPTp: 4
PVposP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
PVposP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
- Dist ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.47 Rel: 0.23
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
+ Pos ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.47 Rel: 0.23
Abs Params: .ToPTp: 4
Layer: ACCnegVal
@@ -315,14 +315,14 @@ Layer: ACCnegVal
PVneg ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.10 Rel: 0.30
PVnegP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.03
ACCutil BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.03
- Dist ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.15 Rel: 0.30
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.03
+ Pos ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.15 Rel: 0.30
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.03
Layer: ACCnegValCT
ACCnegVal CTCtxtPrjn Abs: 1.00 Rel: 1.00 GScale: 0.45 Rel: 0.45
ACCnegValPT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.04 Rel: 0.45
PVnegP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.05
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.05
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.05
Layer: ACCnegValPT
ACCnegValMD BackPrjn Abs: 1.00 Rel: 1.00 GScale: 1.00 Rel: 1.00
@@ -346,9 +346,9 @@ Layer: ACCnegValPTp
Abs Params: .ToPTp: 4
PVnegP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
PVnegP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
- Dist ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.47 Rel: 0.23
+ Pos ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.47 Rel: 0.23
Abs Params: .ToPTp: 4
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
Layer: OFCposValP
ACCutilCT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.12 Rel: 0.50
@@ -403,9 +403,9 @@ Layer: CSP
OFCposUSCT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.10 Rel: 0.50
OFCposUSPTp ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.03 Rel: 0.50
-Layer: Dist
+Layer: Pos
-Layer: DistP
+Layer: PosP
OFCposUSCT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.02 Rel: 0.10
OFCposUSPTp ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.01 Rel: 0.10
OFCposValCT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.03 Rel: 0.10
@@ -417,7 +417,7 @@ Layer: DistP
ALMCT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.03 Rel: 0.10
ALMPTp ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.03 Rel: 0.10
-Layer: Pos
+Layer: Arm
Layer: Act
@@ -431,7 +431,7 @@ Layer: VL
Layer: M1
ALM BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.03
VL BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.03
- Dist ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.16 Rel: 0.31
+ Pos ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.16 Rel: 0.31
OFCnegUS ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.03 Rel: 0.31
ACCutilPTp ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.08 Rel: 0.31
@@ -447,8 +447,8 @@ Layer: ALM
M1 ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.08 Rel: 0.30
M1P BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.03
VL BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.03
- Dist ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.15 Rel: 0.30
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.03
+ Pos ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.15 Rel: 0.30
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.03
ACCutilPTp ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.08 Rel: 0.30
Layer: ALMCT
@@ -456,7 +456,7 @@ Layer: ALMCT
ALMPT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.04 Rel: 0.43
M1P BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.04
VL BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.02 Rel: 0.04
Layer: ALMPT
ALMMD BackPrjn Abs: 1.00 Rel: 1.00 GScale: 1.00 Rel: 1.00
@@ -480,6 +480,6 @@ Layer: ALMPTp
M1 ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.23 Rel: 0.23
Abs Params: .ToPTp: 4
VL BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
- Dist ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.47 Rel: 0.23
+ Pos ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 0.47 Rel: 0.23
Abs Params: .ToPTp: 4
- DistP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
+ PosP BackPrjn Abs: 1.00 Rel: 0.10 GScale: 0.01 Rel: 0.02
diff --git a/examples/deep_fsa/config.go b/examples/deep_fsa/config.go
index 08bee966b..45e045f67 100644
--- a/examples/deep_fsa/config.go
+++ b/examples/deep_fsa/config.go
@@ -10,16 +10,16 @@ package main
type EnvConfig struct {
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
- Env map[string]any `desc:"env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"`
+ Env map[string]any
- // [def: 1] number of units per localist output unit -- 1 works better than 5 here
- UnitsPer int `def:"1" desc:"number of units per localist output unit -- 1 works better than 5 here"`
+ // number of units per localist output unit -- 1 works better than 5 here
+ UnitsPer int `def:"1"`
- // [def: ['B','T','S','X','V','P','E']] names of input letters
- InputNames []string `def:"['B','T','S','X','V','P','E']" desc:"names of input letters"`
+ // ] names of input letters
+ InputNames []string `def:"['B','T','S','X','V','P','E']"`
// map of input names -- initialized during Configenv
- InputNameMap map[string]int `desc:"map of input names -- initialized during Configenv"`
+ InputNameMap map[string]int
}
// InitNameMap is called during ConfigEnv
@@ -37,106 +37,106 @@ func (cfg *EnvConfig) InitNameMap() {
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"true"`
- // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. 16 learns just as well as 1 -- no diffs.
- NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. 16 learns just as well as 1 -- no diffs."`
+ // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. 16 learns just as well as 1 -- no diffs.
+ NData int `def:"16" min:"1"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 5] [min: 1] total number of runs to do when running Train
- NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"5" min:"1"`
- // [def: 100] total number of epochs per run
- NEpochs int `def:"100" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"100"`
- // [def: 196] total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `def:"196" desc:"total number of trials per epoch. Should be an even multiple of NData."`
+ // total number of trials per epoch. Should be an even multiple of NData.
+ NTrials int `def:"196"`
- // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance?
- PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"`
+ // how frequently (in epochs) to compute PCA on hidden representations to measure variance?
+ PCAInterval int `def:"5"`
- // [def: -1] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
- TestInterval int `def:"-1" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"`
+ // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
+ TestInterval int `def:"-1"`
}
// LogConfig has config parameters related to logging data
-type LogConfig struct {
+type LogConfig struct { //gti:add
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
- // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."`
+ // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
+ TestEpoch bool `def:"false" nest:"+"`
- // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."`
+ // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
+ TestTrial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
-type Config struct {
+type Config struct { //gti:add
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] environment configuration options
- Env EnvConfig `view:"add-fields" desc:"environment configuration options"`
+ // environment configuration options
+ Env EnvConfig `view:"add-fields"`
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/deep_fsa/deep_fsa.go b/examples/deep_fsa/deep_fsa.go
index 94f849d4b..d7574b835 100644
--- a/examples/deep_fsa/deep_fsa.go
+++ b/examples/deep_fsa/deep_fsa.go
@@ -6,32 +6,34 @@
// finite state automaton problem.
package main
+//go:generate goki generate -add-types
+
import (
"log"
"os"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/minmax"
- "github.com/emer/etable/tsragg"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/etable/v2/tsragg"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -39,7 +41,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -53,37 +55,37 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.NetParams `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -593,7 +595,7 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
func (ss *Sim) ConfigNetView(nv *netview.NetView) {
nv.ViewDefaults()
// nv.Scene().Camera.Pose.Pos.Set(0, 1.5, 3.0) // more "head on" than default which is more "top down"
- // nv.Scene().Camera.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ // nv.Scene().Camera.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
nv.ConfigLabels(ss.Config.Env.InputNames)
@@ -604,14 +606,14 @@ func (ss *Sim) ConfigNetView(nv *netview.NetView) {
lbl.Pose.Pos.Y += .2
lbl.Pose.Pos.Z += .02
lbl.Pose.Pos.X += 0.05 + float32(li)*.06
- lbl.Pose.Scale.SetMul(mat32.Vec3{0.6, 0.4, 0.5})
+ lbl.Pose.Scale.SetMul(mat32.V3(0.6, 0.4, 0.5))
}
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "DeepAxon Finite State Automaton"
- ss.GUI.MakeWindow(ss, "DeepFSA", title, `This demonstrates a basic DeepAxon model on the Finite State Automaton problem (e.g., the Reber grammar). See emergent on GitHub.`)
+ ss.GUI.MakeBody(ss, "DeepFSA", title, `This demonstrates a basic DeepAxon model on the Finite State Automaton problem (e.g., the Reber grammar). See emergent on GitHub.`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -623,45 +625,47 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.GUI.AddPlots(title, &ss.Logs)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train, etime.Test})
-
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/deep_fsa/README.md")
- },
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train, etime.Test})
+
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/deep_fsa/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
@@ -670,13 +674,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/deep_fsa/fsa_env.go b/examples/deep_fsa/fsa_env.go
index 079385931..123f22eaf 100644
--- a/examples/deep_fsa/fsa_env.go
+++ b/examples/deep_fsa/fsa_env.go
@@ -7,9 +7,9 @@ package main
import (
"fmt"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/etable/v2/etensor"
)
// FSAEnv generates states in a finite state automaton (FSA) which is a
@@ -18,49 +18,49 @@ import (
type FSAEnv struct {
// name of this environment
- Nm string `desc:"name of this environment"`
+ Nm string
// description of this environment
- Dsc string `desc:"description of this environment"`
+ Dsc string
- // [view: no-inline] transition matrix, which is a square NxN tensor with outer dim being current state and inner dim having probability of transitioning to that state
- TMat etensor.Float64 `view:"no-inline" desc:"transition matrix, which is a square NxN tensor with outer dim being current state and inner dim having probability of transitioning to that state"`
+ // transition matrix, which is a square NxN tensor with outer dim being current state and inner dim having probability of transitioning to that state
+ TMat etensor.Float64 `view:"no-inline"`
// transition labels, one for each transition cell in TMat matrix
- Labels etensor.String `desc:"transition labels, one for each transition cell in TMat matrix"`
+ Labels etensor.String
// automaton state within FSA that we're in
- AState env.CurPrvInt `desc:"automaton state within FSA that we're in"`
+ AState env.CurPrvInt
// number of next states in current state output (scalar)
- NNext etensor.Int `desc:"number of next states in current state output (scalar)"`
+ NNext etensor.Int
// next states that have non-zero probability, with actual randomly chosen next state at start
- NextStates etensor.Int `desc:"next states that have non-zero probability, with actual randomly chosen next state at start"`
+ NextStates etensor.Int
// transition labels for next states that have non-zero probability, with actual randomly chosen one for next state at start
- NextLabels etensor.String `desc:"transition labels for next states that have non-zero probability, with actual randomly chosen one for next state at start"`
+ NextLabels etensor.String
- // [view: inline] current run of model as provided during Init
- Run env.Ctr `view:"inline" desc:"current run of model as provided during Init"`
+ // current run of model as provided during Init
+ Run env.Ctr `view:"inline"`
- // [view: inline] number of times through Seq.Max number of sequences
- Epoch env.Ctr `view:"inline" desc:"number of times through Seq.Max number of sequences"`
+ // number of times through Seq.Max number of sequences
+ Epoch env.Ctr `view:"inline"`
- // [view: inline] sequence counter within epoch
- Seq env.Ctr `view:"inline" desc:"sequence counter within epoch"`
+ // sequence counter within epoch
+ Seq env.Ctr `view:"inline"`
- // [view: inline] tick counter within sequence
- Tick env.Ctr `view:"inline" desc:"tick counter within sequence"`
+ // tick counter within sequence
+ Tick env.Ctr `view:"inline"`
- // [view: inline] trial is the step counter within sequence - how many steps taken within current sequence -- it resets to 0 at start of each sequence
- Trial env.Ctr `view:"inline" desc:"trial is the step counter within sequence - how many steps taken within current sequence -- it resets to 0 at start of each sequence"`
+ // trial is the step counter within sequence - how many steps taken within current sequence -- it resets to 0 at start of each sequence
+ Trial env.Ctr `view:"inline"`
- // [view: -] random number generator for the env -- all random calls must use this -- set seed here for weight initialization values
- Rand erand.SysRand `view:"-" desc:"random number generator for the env -- all random calls must use this -- set seed here for weight initialization values"`
+ // random number generator for the env -- all random calls must use this -- set seed here for weight initialization values
+ Rand erand.SysRand `view:"-"`
// random seed
- RndSeed int64 `inactive:"+" desc:"random seed"`
+ RndSeed int64 `inactive:"+"`
}
func (ev *FSAEnv) Name() string { return ev.Nm }
diff --git a/examples/deep_fsa/gtigen.go b/examples/deep_fsa/gtigen.go
new file mode 100644
index 000000000..954b91d4c
--- /dev/null
+++ b/examples/deep_fsa/gtigen.go
@@ -0,0 +1,156 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.EnvConfig",
+ ShortName: "main.EnvConfig",
+ IDName: "env-config",
+ Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Env", >i.Field{Name: "Env", Type: "map[string]any", LocalType: "map[string]any", Doc: "env parameters -- can set any field/subfield on Env struct, using standard TOML formatting", Directives: gti.Directives{}, Tag: ""}},
+ {"UnitsPer", >i.Field{Name: "UnitsPer", Type: "int", LocalType: "int", Doc: "number of units per localist output unit -- 1 works better than 5 here", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ {"InputNames", >i.Field{Name: "InputNames", Type: "[]string", LocalType: "[]string", Doc: "] names of input letters", Directives: gti.Directives{}, Tag: "def:\"['B','T','S','X','V','P','E']\""}},
+ {"InputNameMap", >i.Field{Name: "InputNameMap", Type: "map[string]int", LocalType: "map[string]int", Doc: "map of input names -- initialized during Configenv", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"NData", >i.Field{Name: "NData", Type: "int", LocalType: "int", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. 16 learns just as well as 1 -- no diffs.", Directives: gti.Directives{}, Tag: "def:\"16\" min:\"1\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"NRuns", >i.Field{Name: "NRuns", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"5\" min:\"1\""}},
+ {"NEpochs", >i.Field{Name: "NEpochs", Type: "int", LocalType: "int", Doc: "total number of epochs per run", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"NTrials", >i.Field{Name: "NTrials", Type: "int", LocalType: "int", Doc: "total number of trials per epoch. Should be an even multiple of NData.", Directives: gti.Directives{}, Tag: "def:\"196\""}},
+ {"PCAInterval", >i.Field{Name: "PCAInterval", Type: "int", LocalType: "int", Doc: "how frequently (in epochs) to compute PCA on hidden representations to measure variance?", Directives: gti.Directives{}, Tag: "def:\"5\""}},
+ {"TestInterval", >i.Field{Name: "TestInterval", Type: "int", LocalType: "int", Doc: "how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing", Directives: gti.Directives{}, Tag: "def:\"-1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "bool", LocalType: "bool", Doc: "if true, save train epoch log to file, as .epc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Run", >i.Field{Name: "Run", Type: "bool", LocalType: "bool", Doc: "if true, save run log to file, as .run.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestEpoch", >i.Field{Name: "TestEpoch", Type: "bool", LocalType: "bool", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestTrial", >i.Field{Name: "TestTrial", Type: "bool", LocalType: "bool", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Env", >i.Field{Name: "Env", Type: "github.com/emer/axon/examples/deep_fsa.EnvConfig", LocalType: "EnvConfig", Doc: "environment configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/deep_fsa.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/deep_fsa.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/deep_fsa.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/deep_fsa.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "all parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"Envs", >i.Field{Name: "Envs", Type: "github.com/emer/emergent/v2/env.Envs", LocalType: "env.Envs", Doc: "Environments", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.FSAEnv",
+ ShortName: "main.FSAEnv",
+ IDName: "fsa-env",
+ Doc: "FSAEnv generates states in a finite state automaton (FSA) which is a\nsimple form of grammar for creating non-deterministic but still\noverall structured sequences.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Nm", >i.Field{Name: "Nm", Type: "string", LocalType: "string", Doc: "name of this environment", Directives: gti.Directives{}, Tag: ""}},
+ {"Dsc", >i.Field{Name: "Dsc", Type: "string", LocalType: "string", Doc: "description of this environment", Directives: gti.Directives{}, Tag: ""}},
+ {"TMat", >i.Field{Name: "TMat", Type: "goki.dev/etable/v2/etensor.Float64", LocalType: "etensor.Float64", Doc: "transition matrix, which is a square NxN tensor with outer dim being current state and inner dim having probability of transitioning to that state", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Labels", >i.Field{Name: "Labels", Type: "goki.dev/etable/v2/etensor.String", LocalType: "etensor.String", Doc: "transition labels, one for each transition cell in TMat matrix", Directives: gti.Directives{}, Tag: ""}},
+ {"AState", >i.Field{Name: "AState", Type: "github.com/emer/emergent/v2/env.CurPrvInt", LocalType: "env.CurPrvInt", Doc: "automaton state within FSA that we're in", Directives: gti.Directives{}, Tag: ""}},
+ {"NNext", >i.Field{Name: "NNext", Type: "goki.dev/etable/v2/etensor.Int", LocalType: "etensor.Int", Doc: "number of next states in current state output (scalar)", Directives: gti.Directives{}, Tag: ""}},
+ {"NextStates", >i.Field{Name: "NextStates", Type: "goki.dev/etable/v2/etensor.Int", LocalType: "etensor.Int", Doc: "next states that have non-zero probability, with actual randomly chosen next state at start", Directives: gti.Directives{}, Tag: ""}},
+ {"NextLabels", >i.Field{Name: "NextLabels", Type: "goki.dev/etable/v2/etensor.String", LocalType: "etensor.String", Doc: "transition labels for next states that have non-zero probability, with actual randomly chosen one for next state at start", Directives: gti.Directives{}, Tag: ""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "current run of model as provided during Init", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "number of times through Seq.Max number of sequences", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Seq", >i.Field{Name: "Seq", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "sequence counter within epoch", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Tick", >i.Field{Name: "Tick", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "tick counter within sequence", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "trial is the step counter within sequence - how many steps taken within current sequence -- it resets to 0 at start of each sequence", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Rand", >i.Field{Name: "Rand", Type: "github.com/emer/emergent/v2/erand.SysRand", LocalType: "erand.SysRand", Doc: "random number generator for the env -- all random calls must use this -- set seed here for weight initialization values", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeed", >i.Field{Name: "RndSeed", Type: "int64", LocalType: "int64", Doc: "random seed", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/deep_fsa/params.go b/examples/deep_fsa/params.go
index 690c2846b..5f67b61d5 100644
--- a/examples/deep_fsa/params.go
+++ b/examples/deep_fsa/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets is the default set of parameters -- Base is always applied, and others can be optionally
diff --git a/examples/deep_fsa/params_good/params_all.txt b/examples/deep_fsa/params_good/params_all.txt
index 072c1f0a3..6b6eceb8d 100644
--- a/examples/deep_fsa/params_good/params_all.txt
+++ b/examples/deep_fsa/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 10 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.1 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: InputP
@@ -52,13 +52,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 10 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.1 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -78,7 +78,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.2 FullDriveAct: 0.6 DriveLayIdx: 0
@@ -116,13 +116,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 10 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.1 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -142,7 +142,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: Hidden
@@ -157,13 +157,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 10 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.1 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -183,7 +183,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -241,13 +241,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 10 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.1 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -267,7 +267,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
diff --git a/examples/deep_move/config.go b/examples/deep_move/config.go
index c7fc8bcdb..a557e8eee 100644
--- a/examples/deep_move/config.go
+++ b/examples/deep_move/config.go
@@ -7,122 +7,122 @@ package main
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
// other params are set via the Env map data mechanism.
-type EnvConfig struct {
+type EnvConfig struct { //gti:add
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
- Env map[string]any `desc:"env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"`
+ Env map[string]any
- // [def: 4] number of units per localist output unit
- UnitsPer int `def:"4" desc:"number of units per localist output unit"`
+ // number of units per localist output unit
+ UnitsPer int `def:"4"`
}
// ParamConfig has config parameters related to sim params
-type ParamConfig struct {
+type ParamConfig struct { //gti:add
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// use a second hidden layer that predicts the first -- is not beneficial for this simple markovian task
- Hid2 bool `desc:"use a second hidden layer that predicts the first -- is not beneficial for this simple markovian task"`
+ Hid2 bool
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
-type RunConfig struct {
+type RunConfig struct { //gti:add
- // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"true"`
- // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
- NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."`
+ // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
+ NData int `def:"16" min:"1"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 5] [min: 1] total number of runs to do when running Train
- NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"5" min:"1"`
- // [def: 100] total number of epochs per run
- NEpochs int `def:"100" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"100"`
- // [def: 196] total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `def:"196" desc:"total number of trials per epoch. Should be an even multiple of NData."`
+ // total number of trials per epoch. Should be an even multiple of NData.
+ NTrials int `def:"196"`
- // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance?
- PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"`
+ // how frequently (in epochs) to compute PCA on hidden representations to measure variance?
+ PCAInterval int `def:"5"`
- // [def: -1] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
- TestInterval int `def:"-1" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"`
+ // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
+ TestInterval int `def:"-1"`
}
// LogConfig has config parameters related to logging data
-type LogConfig struct {
+type LogConfig struct { //gti:add
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
- // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."`
+ // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
+ TestEpoch bool `def:"false" nest:"+"`
- // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."`
+ // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
+ TestTrial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
-type Config struct {
+type Config struct { //gti:add
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] environment configuration options
- Env EnvConfig `view:"add-fields" desc:"environment configuration options"`
+ // environment configuration options
+ Env EnvConfig `view:"add-fields"`
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/deep_move/deep_move.go b/examples/deep_move/deep_move.go
index 780537b1e..6429cc32f 100644
--- a/examples/deep_move/deep_move.go
+++ b/examples/deep_move/deep_move.go
@@ -6,31 +6,33 @@
// on visual inputs.
package main
+//go:generate goki generate -add-types
+
import (
"os"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/evec"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/etable"
- _ "github.com/emer/etable/etview" // _ = include to get gui views
- "github.com/emer/etable/metric"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/evec"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/etable"
+ _ "goki.dev/etable/v2/etview" // _ = include to get gui views
+ "goki.dev/etable/v2/metric"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -38,7 +40,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -52,37 +54,37 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.NetParams `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -599,14 +601,14 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
func (ss *Sim) ConfigNetView(nv *netview.NetView) {
nv.ViewDefaults()
- nv.Scene().Camera.Pose.Pos.Set(0, 2.1, 2.0)
- nv.Scene().Camera.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 2.1, 2.0)
+ nv.SceneXYZ().Camera.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "DeepAxon Move Prediction"
- ss.GUI.MakeWindow(ss, "DeepMove", title, `This demonstrates a basic DeepAxon model on move prediction. See emergent on GitHub.`)
+ ss.GUI.MakeBody(ss, "DeepMove", title, `This demonstrates a basic DeepAxon model on move prediction. See emergent on GitHub.`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -618,53 +620,55 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.GUI.AddPlots(title, &ss.Logs)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train, etime.Test})
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Test Init",
- Icon: "reset",
- Tooltip: "restart testing",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Loops.ResetCountersByMode(etime.Test)
- },
- })
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train, etime.Test})
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Test Init",
+ Icon: "reset",
+ Tooltip: "restart testing",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Loops.ResetCountersByMode(etime.Test)
+ },
+ })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/deep_move/README.md")
- },
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/deep_move/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
@@ -673,13 +677,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/deep_move/gtigen.go b/examples/deep_move/gtigen.go
new file mode 100644
index 000000000..432b2adf4
--- /dev/null
+++ b/examples/deep_move/gtigen.go
@@ -0,0 +1,173 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.EnvConfig",
+ ShortName: "main.EnvConfig",
+ IDName: "env-config",
+ Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Env", >i.Field{Name: "Env", Type: "map[string]any", LocalType: "map[string]any", Doc: "env parameters -- can set any field/subfield on Env struct, using standard TOML formatting", Directives: gti.Directives{}, Tag: ""}},
+ {"UnitsPer", >i.Field{Name: "UnitsPer", Type: "int", LocalType: "int", Doc: "number of units per localist output unit", Directives: gti.Directives{}, Tag: "def:\"4\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Hid2", >i.Field{Name: "Hid2", Type: "bool", LocalType: "bool", Doc: "use a second hidden layer that predicts the first -- is not beneficial for this simple markovian task", Directives: gti.Directives{}, Tag: ""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"NData", >i.Field{Name: "NData", Type: "int", LocalType: "int", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.", Directives: gti.Directives{}, Tag: "def:\"16\" min:\"1\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"NRuns", >i.Field{Name: "NRuns", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"5\" min:\"1\""}},
+ {"NEpochs", >i.Field{Name: "NEpochs", Type: "int", LocalType: "int", Doc: "total number of epochs per run", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"NTrials", >i.Field{Name: "NTrials", Type: "int", LocalType: "int", Doc: "total number of trials per epoch. Should be an even multiple of NData.", Directives: gti.Directives{}, Tag: "def:\"196\""}},
+ {"PCAInterval", >i.Field{Name: "PCAInterval", Type: "int", LocalType: "int", Doc: "how frequently (in epochs) to compute PCA on hidden representations to measure variance?", Directives: gti.Directives{}, Tag: "def:\"5\""}},
+ {"TestInterval", >i.Field{Name: "TestInterval", Type: "int", LocalType: "int", Doc: "how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing", Directives: gti.Directives{}, Tag: "def:\"-1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "bool", LocalType: "bool", Doc: "if true, save train epoch log to file, as .epc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Run", >i.Field{Name: "Run", Type: "bool", LocalType: "bool", Doc: "if true, save run log to file, as .run.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestEpoch", >i.Field{Name: "TestEpoch", Type: "bool", LocalType: "bool", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestTrial", >i.Field{Name: "TestTrial", Type: "bool", LocalType: "bool", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Env", >i.Field{Name: "Env", Type: "github.com/emer/axon/examples/deep_move.EnvConfig", LocalType: "EnvConfig", Doc: "environment configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/deep_move.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/deep_move.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/deep_move.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/deep_move.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "all parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"Envs", >i.Field{Name: "Envs", Type: "github.com/emer/emergent/v2/env.Envs", LocalType: "env.Envs", Doc: "Environments", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.MoveEnv",
+ ShortName: "main.MoveEnv",
+ IDName: "move-env",
+ Doc: "MoveEnv is a flat-world grid-based environment",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Nm", >i.Field{Name: "Nm", Type: "string", LocalType: "string", Doc: "name of this environment", Directives: gti.Directives{}, Tag: ""}},
+ {"Disp", >i.Field{Name: "Disp", Type: "bool", LocalType: "bool", Doc: "update display -- turn off to make it faster", Directives: gti.Directives{}, Tag: ""}},
+ {"Size", >i.Field{Name: "Size", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "size of 2D world", Directives: gti.Directives{}, Tag: ""}},
+ {"World", >i.Field{Name: "World", Type: "*goki.dev/etable/v2/etensor.Int", LocalType: "*etensor.Int", Doc: "2D grid world, each cell is a material (mat)", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Acts", >i.Field{Name: "Acts", Type: "[]string", LocalType: "[]string", Doc: "list of actions: starts with: Stay, Left, Right, Forward, Back, then extensible", Directives: gti.Directives{}, Tag: ""}},
+ {"ActMap", >i.Field{Name: "ActMap", Type: "map[string]int", LocalType: "map[string]int", Doc: "action map of action names to indexes", Directives: gti.Directives{}, Tag: ""}},
+ {"FOV", >i.Field{Name: "FOV", Type: "int", LocalType: "int", Doc: "field of view in degrees, e.g., 180, must be even multiple of AngInc", Directives: gti.Directives{}, Tag: ""}},
+ {"AngInc", >i.Field{Name: "AngInc", Type: "int", LocalType: "int", Doc: "angle increment for rotation, in degrees -- defaults to 15", Directives: gti.Directives{}, Tag: ""}},
+ {"NRotAngles", >i.Field{Name: "NRotAngles", Type: "int", LocalType: "int", Doc: "total number of rotation angles in a circle", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"NFOVRays", >i.Field{Name: "NFOVRays", Type: "int", LocalType: "int", Doc: "total number of FOV rays that are traced", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"DepthSize", >i.Field{Name: "DepthSize", Type: "int", LocalType: "int", Doc: "number of units in depth population codes", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"DepthCode", >i.Field{Name: "DepthCode", Type: "github.com/emer/emergent/v2/popcode.OneD", LocalType: "popcode.OneD", Doc: "population code for depth, in normalized units", Directives: gti.Directives{}, Tag: ""}},
+ {"AngCode", >i.Field{Name: "AngCode", Type: "github.com/emer/emergent/v2/popcode.Ring", LocalType: "popcode.Ring", Doc: "angle population code values, in normalized units", Directives: gti.Directives{}, Tag: ""}},
+ {"UnitsPer", >i.Field{Name: "UnitsPer", Type: "int", LocalType: "int", Doc: "number of units per localist value", Directives: gti.Directives{}, Tag: ""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "print debug messages", Directives: gti.Directives{}, Tag: ""}},
+ {"PctBlank", >i.Field{Name: "PctBlank", Type: "float32", LocalType: "float32", Doc: "proportion of times that a blank input is generated -- for testing pulvinar behavior with blank inputs", Directives: gti.Directives{}, Tag: ""}},
+ {"PosF", >i.Field{Name: "PosF", Type: "goki.dev/mat32/v2.Vec2", LocalType: "mat32.Vec2", Doc: "current location of agent, floating point", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"PosI", >i.Field{Name: "PosI", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "current location of agent, integer", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Angle", >i.Field{Name: "Angle", Type: "int", LocalType: "int", Doc: "current angle, in degrees", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"RotAng", >i.Field{Name: "RotAng", Type: "int", LocalType: "int", Doc: "angle that we just rotated -- drives vestibular", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Act", >i.Field{Name: "Act", Type: "int", LocalType: "int", Doc: "last action taken", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Depths", >i.Field{Name: "Depths", Type: "[]float32", LocalType: "[]float32", Doc: "depth for each angle (NFOVRays), raw", Directives: gti.Directives{}, Tag: ""}},
+ {"DepthLogs", >i.Field{Name: "DepthLogs", Type: "[]float32", LocalType: "[]float32", Doc: "depth for each angle (NFOVRays), normalized log", Directives: gti.Directives{}, Tag: ""}},
+ {"CurStates", >i.Field{Name: "CurStates", Type: "map[string]*goki.dev/etable/v2/etensor.Float32", LocalType: "map[string]*etensor.Float32", Doc: "current rendered state tensors -- extensible map", Directives: gti.Directives{}, Tag: ""}},
+ {"NextStates", >i.Field{Name: "NextStates", Type: "map[string]*goki.dev/etable/v2/etensor.Float32", LocalType: "map[string]*etensor.Float32", Doc: "next rendered state tensors -- updated from actions", Directives: gti.Directives{}, Tag: ""}},
+ {"Rand", >i.Field{Name: "Rand", Type: "github.com/emer/emergent/v2/erand.SysRand", LocalType: "erand.SysRand", Doc: "random number generator for the env -- all random calls must use this -- set seed here for weight initialization values", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeed", >i.Field{Name: "RndSeed", Type: "int64", LocalType: "int64", Doc: "random seed", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/deep_move/move_env.go b/examples/deep_move/move_env.go
index 1e3162a65..941cb57ab 100644
--- a/examples/deep_move/move_env.go
+++ b/examples/deep_move/move_env.go
@@ -8,99 +8,99 @@ import (
"fmt"
"math/rand"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/evec"
- "github.com/emer/emergent/popcode"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/evec"
+ "github.com/emer/emergent/v2/popcode"
"github.com/goki/ki/ints"
"github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/mat32/v2"
)
// MoveEnv is a flat-world grid-based environment
type MoveEnv struct {
// name of this environment
- Nm string `desc:"name of this environment"`
+ Nm string
// update display -- turn off to make it faster
- Disp bool `desc:"update display -- turn off to make it faster"`
+ Disp bool
// size of 2D world
- Size evec.Vec2i `desc:"size of 2D world"`
+ Size evec.Vec2i
- // [view: no-inline] 2D grid world, each cell is a material (mat)
- World *etensor.Int `view:"no-inline" desc:"2D grid world, each cell is a material (mat)"`
+ // 2D grid world, each cell is a material (mat)
+ World *etensor.Int `view:"no-inline"`
// list of actions: starts with: Stay, Left, Right, Forward, Back, then extensible
- Acts []string `desc:"list of actions: starts with: Stay, Left, Right, Forward, Back, then extensible"`
+ Acts []string
// action map of action names to indexes
- ActMap map[string]int `desc:"action map of action names to indexes"`
+ ActMap map[string]int
// field of view in degrees, e.g., 180, must be even multiple of AngInc
- FOV int `desc:"field of view in degrees, e.g., 180, must be even multiple of AngInc"`
+ FOV int
// angle increment for rotation, in degrees -- defaults to 15
- AngInc int `desc:"angle increment for rotation, in degrees -- defaults to 15"`
+ AngInc int
// total number of rotation angles in a circle
- NRotAngles int `inactive:"+" desc:"total number of rotation angles in a circle"`
+ NRotAngles int `inactive:"+"`
// total number of FOV rays that are traced
- NFOVRays int `inactive:"+" desc:"total number of FOV rays that are traced"`
+ NFOVRays int `inactive:"+"`
// number of units in depth population codes
- DepthSize int `inactive:"+" desc:"number of units in depth population codes"`
+ DepthSize int `inactive:"+"`
// population code for depth, in normalized units
- DepthCode popcode.OneD `desc:"population code for depth, in normalized units"`
+ DepthCode popcode.OneD
// angle population code values, in normalized units
- AngCode popcode.Ring `desc:"angle population code values, in normalized units"`
+ AngCode popcode.Ring
// number of units per localist value
- UnitsPer int `desc:"number of units per localist value"`
+ UnitsPer int
// print debug messages
- Debug bool `desc:"print debug messages"`
+ Debug bool
// proportion of times that a blank input is generated -- for testing pulvinar behavior with blank inputs
- PctBlank float32 `desc:"proportion of times that a blank input is generated -- for testing pulvinar behavior with blank inputs"`
+ PctBlank float32
// current location of agent, floating point
- PosF mat32.Vec2 `inactive:"+" desc:"current location of agent, floating point"`
+ PosF mat32.Vec2 `inactive:"+"`
// current location of agent, integer
- PosI evec.Vec2i `inactive:"+" desc:"current location of agent, integer"`
+ PosI evec.Vec2i `inactive:"+"`
// current angle, in degrees
- Angle int `inactive:"+" desc:"current angle, in degrees"`
+ Angle int `inactive:"+"`
// angle that we just rotated -- drives vestibular
- RotAng int `inactive:"+" desc:"angle that we just rotated -- drives vestibular"`
+ RotAng int `inactive:"+"`
// last action taken
- Act int `inactive:"+" desc:"last action taken"`
+ Act int `inactive:"+"`
// depth for each angle (NFOVRays), raw
- Depths []float32 `desc:"depth for each angle (NFOVRays), raw"`
+ Depths []float32
// depth for each angle (NFOVRays), normalized log
- DepthLogs []float32 `desc:"depth for each angle (NFOVRays), normalized log"`
+ DepthLogs []float32
// current rendered state tensors -- extensible map
- CurStates map[string]*etensor.Float32 `desc:"current rendered state tensors -- extensible map"`
+ CurStates map[string]*etensor.Float32
// next rendered state tensors -- updated from actions
- NextStates map[string]*etensor.Float32 `desc:"next rendered state tensors -- updated from actions"`
+ NextStates map[string]*etensor.Float32
- // [view: -] random number generator for the env -- all random calls must use this -- set seed here for weight initialization values
- Rand erand.SysRand `view:"-" desc:"random number generator for the env -- all random calls must use this -- set seed here for weight initialization values"`
+ // random number generator for the env -- all random calls must use this -- set seed here for weight initialization values
+ Rand erand.SysRand `view:"-"`
// random seed
- RndSeed int64 `inactive:"+" desc:"random seed"`
+ RndSeed int64 `inactive:"+"`
}
var KiT_MoveEnv = kit.Types.AddType(&MoveEnv{}, nil)
@@ -208,7 +208,7 @@ func AngMod(ang int) int {
// such that the largest value is 1.
func AngVec(ang int) mat32.Vec2 {
a := mat32.DegToRad(float32(AngMod(ang)))
- v := mat32.Vec2{mat32.Cos(a), mat32.Sin(a)}
+ v := mat32.V2(mat32.Cos(a), mat32.Sin(a))
return NormVecLine(v)
}
diff --git a/examples/deep_move/params.go b/examples/deep_move/params.go
index 8777dc6be..0086e0903 100644
--- a/examples/deep_move/params.go
+++ b/examples/deep_move/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets is the default set of parameters -- Base is always applied, and others can be optionally
diff --git a/examples/deep_move/params_good/params_all.txt b/examples/deep_move/params_good/params_all.txt
index 8bdcdd52a..fb2894120 100644
--- a/examples/deep_move/params_good/params_all.txt
+++ b/examples/deep_move/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: DepthP
@@ -52,13 +52,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -78,7 +78,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 0
@@ -116,13 +116,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -142,7 +142,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: HeadDirP
@@ -157,13 +157,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -183,7 +183,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 2
@@ -221,13 +221,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -247,7 +247,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: DepthHid
@@ -262,13 +262,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -288,7 +288,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -366,13 +366,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -392,7 +392,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.5 DecayTau: 0
@@ -450,13 +450,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -476,7 +476,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -554,13 +554,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -580,7 +580,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 1 DecayTau: 0
diff --git a/examples/deep_move/params_good/params_layers.txt b/examples/deep_move/params_good/params_layers.txt
index 3c2cd7d30..26dd9d2cc 100644
--- a/examples/deep_move/params_good/params_layers.txt
+++ b/examples/deep_move/params_good/params_layers.txt
@@ -10,7 +10,7 @@
HeadDirP Nominal: 0.13 Params: .HeadDirIn: 0.13 | Layer: 0.1
Layer.Gi: 0.80 Params: .PulvinarLayer: 0.8 | .HeadDirIn: 0.9 | Layer: 1.0
- Action Nominal: 0.25 Params: #Action: 0.25 | Base:Layer: 0.1
+ Action Nominal: 0.25 Params: #Action: 0.25 | Layer: 0.1
Layer.Gi: 0.90 Params: #Action: 0.9 | Layer: 1.0
DepthHid Nominal: 0.07 Params: #DepthHid: 0.07 | .SuperLayer: 0.1 | Layer: 0.1
diff --git a/examples/deep_music/config.go b/examples/deep_music/config.go
index 111078295..306f5cc06 100644
--- a/examples/deep_music/config.go
+++ b/examples/deep_music/config.go
@@ -10,128 +10,128 @@ package main
type EnvConfig struct {
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
- Env map[string]any `desc:"env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"`
+ Env map[string]any
- // [def: 4] number of units per localist output unit
- UnitsPer int `def:"4" desc:"number of units per localist output unit"`
+ // number of units per localist output unit
+ UnitsPer int `def:"4"`
// train the full song -- else 30 notes
- FullSong bool `desc:"train the full song -- else 30 notes"`
+ FullSong bool
// during testing, play the target note instead of the actual network output
- PlayTarg bool `desc:"during testing, play the target note instead of the actual network output"`
+ PlayTarg bool
- // [def: true] drive inputs from the training sequence during testing -- otherwise use network's own output
- TestClamp bool `def:"true" desc:"drive inputs from the training sequence during testing -- otherwise use network's own output"`
+ // drive inputs from the training sequence during testing -- otherwise use network's own output
+ TestClamp bool `def:"true"`
}
// ParamConfig has config parameters related to sim params
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// use a second hidden layer that predicts the first -- is not beneficia
- Hid2 bool `desc:"use a second hidden layer that predicts the first -- is not beneficia"`
+ Hid2 bool
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"true"`
- // [def: 4] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. is noisy above 4 for 30 note case.
- NData int `def:"4" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. is noisy above 4 for 30 note case."`
+ // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. is noisy above 4 for 30 note case.
+ NData int `def:"4" min:"1"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 5] [min: 1] total number of runs to do when running Train
- NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"5" min:"1"`
- // [def: 100] total number of epochs per run
- NEpochs int `def:"100" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"100"`
- // [def: 128] total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `def:"128" desc:"total number of trials per epoch. Should be an even multiple of NData."`
+ // total number of trials per epoch. Should be an even multiple of NData.
+ NTrials int `def:"128"`
- // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance?
- PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"`
+ // how frequently (in epochs) to compute PCA on hidden representations to measure variance?
+ PCAInterval int `def:"5"`
- // [def: -1] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
- TestInterval int `def:"-1" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"`
+ // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
+ TestInterval int `def:"-1"`
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
- // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."`
+ // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
+ TestEpoch bool `def:"false" nest:"+"`
- // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."`
+ // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
+ TestTrial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
type Config struct {
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] environment configuration options
- Env EnvConfig `view:"add-fields" desc:"environment configuration options"`
+ // environment configuration options
+ Env EnvConfig `view:"add-fields"`
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/deep_music/deep_music.go b/examples/deep_music/deep_music.go
index 4c33e4c4c..944c43005 100644
--- a/examples/deep_music/deep_music.go
+++ b/examples/deep_music/deep_music.go
@@ -6,31 +6,33 @@
// in a musical sequence of notes.
package main
+//go:generate goki generate -add-types
+
import (
"fmt"
"os"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/etable"
- _ "github.com/emer/etable/etview" // _ = include to get gui views
- "github.com/emer/etable/metric"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/etable"
+ _ "goki.dev/etable/v2/etview" // _ = include to get gui views
+ "goki.dev/etable/v2/metric"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -38,7 +40,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -52,37 +54,37 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.NetParams `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -591,14 +593,14 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
func (ss *Sim) ConfigNetView(nv *netview.NetView) {
nv.ViewDefaults()
- nv.Scene().Camera.Pose.Pos.Set(0, 2.1, 2.0)
- nv.Scene().Camera.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 2.1, 2.0)
+ nv.SceneXYZ().Camera.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "DeepAxon Music Prediction"
- ss.GUI.MakeWindow(ss, "DeepMusic", title, `This demonstrates a basic DeepAxon model on music prediction. See emergent on GitHub.`)
+ ss.GUI.MakeBody(ss, "DeepMusic", title, `This demonstrates a basic DeepAxon model on music prediction. See emergent on GitHub.`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -610,54 +612,57 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.GUI.AddPlots(title, &ss.Logs)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train, etime.Test})
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Test Init",
- Icon: "reset",
- Tooltip: "restart testing",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Loops.ResetCountersByMode(etime.Test)
- },
- })
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train, etime.Test})
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Test Init",
+ Icon: "reset",
+ Tooltip: "restart testing",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Loops.ResetCountersByMode(etime.Test)
+ },
+ })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/deep_music/README.md")
- },
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/deep_music/README.md")
+ },
+ })
})
+
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
ss.Net.ConfigGPUwithGUI(&ss.Context)
@@ -665,13 +670,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/deep_music/gtigen.go b/examples/deep_music/gtigen.go
new file mode 100644
index 000000000..cbb136520
--- /dev/null
+++ b/examples/deep_music/gtigen.go
@@ -0,0 +1,156 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.EnvConfig",
+ ShortName: "main.EnvConfig",
+ IDName: "env-config",
+ Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Env", >i.Field{Name: "Env", Type: "map[string]any", LocalType: "map[string]any", Doc: "env parameters -- can set any field/subfield on Env struct, using standard TOML formatting", Directives: gti.Directives{}, Tag: ""}},
+ {"UnitsPer", >i.Field{Name: "UnitsPer", Type: "int", LocalType: "int", Doc: "number of units per localist output unit", Directives: gti.Directives{}, Tag: "def:\"4\""}},
+ {"FullSong", >i.Field{Name: "FullSong", Type: "bool", LocalType: "bool", Doc: "train the full song -- else 30 notes", Directives: gti.Directives{}, Tag: ""}},
+ {"PlayTarg", >i.Field{Name: "PlayTarg", Type: "bool", LocalType: "bool", Doc: "during testing, play the target note instead of the actual network output", Directives: gti.Directives{}, Tag: ""}},
+ {"TestClamp", >i.Field{Name: "TestClamp", Type: "bool", LocalType: "bool", Doc: "drive inputs from the training sequence during testing -- otherwise use network's own output", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Hid2", >i.Field{Name: "Hid2", Type: "bool", LocalType: "bool", Doc: "use a second hidden layer that predicts the first -- is not beneficia", Directives: gti.Directives{}, Tag: ""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"NData", >i.Field{Name: "NData", Type: "int", LocalType: "int", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. is noisy above 4 for 30 note case.", Directives: gti.Directives{}, Tag: "def:\"4\" min:\"1\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"NRuns", >i.Field{Name: "NRuns", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"5\" min:\"1\""}},
+ {"NEpochs", >i.Field{Name: "NEpochs", Type: "int", LocalType: "int", Doc: "total number of epochs per run", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"NTrials", >i.Field{Name: "NTrials", Type: "int", LocalType: "int", Doc: "total number of trials per epoch. Should be an even multiple of NData.", Directives: gti.Directives{}, Tag: "def:\"128\""}},
+ {"PCAInterval", >i.Field{Name: "PCAInterval", Type: "int", LocalType: "int", Doc: "how frequently (in epochs) to compute PCA on hidden representations to measure variance?", Directives: gti.Directives{}, Tag: "def:\"5\""}},
+ {"TestInterval", >i.Field{Name: "TestInterval", Type: "int", LocalType: "int", Doc: "how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing", Directives: gti.Directives{}, Tag: "def:\"-1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "bool", LocalType: "bool", Doc: "if true, save train epoch log to file, as .epc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Run", >i.Field{Name: "Run", Type: "bool", LocalType: "bool", Doc: "if true, save run log to file, as .run.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestEpoch", >i.Field{Name: "TestEpoch", Type: "bool", LocalType: "bool", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestTrial", >i.Field{Name: "TestTrial", Type: "bool", LocalType: "bool", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Env", >i.Field{Name: "Env", Type: "github.com/emer/axon/examples/deep_music.EnvConfig", LocalType: "EnvConfig", Doc: "environment configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/deep_music.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/deep_music.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/deep_music.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/deep_music.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "all parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"Envs", >i.Field{Name: "Envs", Type: "github.com/emer/emergent/v2/env.Envs", LocalType: "env.Envs", Doc: "Environments", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.MusicEnv",
+ ShortName: "main.MusicEnv",
+ IDName: "music-env",
+ Doc: "MusicEnv reads in a midi SMF file and presents it as a sequence of notes.\nSongs with one note at a time per track are currently supported.\nRenders note to a tensor with localist note coding with duplicate units for spiking.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Nm", >i.Field{Name: "Nm", Type: "string", LocalType: "string", Doc: "name of this environment", Directives: gti.Directives{}, Tag: ""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "emit debugging messages about the music file", Directives: gti.Directives{}, Tag: ""}},
+ {"WrapNotes", >i.Field{Name: "WrapNotes", Type: "bool", LocalType: "bool", Doc: "use only 1 octave of 12 notes for everything -- keeps it consistent", Directives: gti.Directives{}, Tag: ""}},
+ {"TicksPer", >i.Field{Name: "TicksPer", Type: "int", LocalType: "int", Doc: "number of time ticks per row in table -- note transitions that are faster than this will be lost", Directives: gti.Directives{}, Tag: "def:\"120\""}},
+ {"Track", >i.Field{Name: "Track", Type: "int", LocalType: "int", Doc: "which track to process", Directives: gti.Directives{}, Tag: ""}},
+ {"Play", >i.Field{Name: "Play", Type: "bool", LocalType: "bool", Doc: "play output as it steps", Directives: gti.Directives{}, Tag: ""}},
+ {"MaxSteps", >i.Field{Name: "MaxSteps", Type: "int", LocalType: "int", Doc: "limit song length to given number of steps, if > 0", Directives: gti.Directives{}, Tag: ""}},
+ {"DiOffset", >i.Field{Name: "DiOffset", Type: "int", LocalType: "int", Doc: "time offset for data parallel = Song.Rows / (NData+1)", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"UnitsPer", >i.Field{Name: "UnitsPer", Type: "int", LocalType: "int", Doc: "number of units per localist note value", Directives: gti.Directives{}, Tag: ""}},
+ {"NoteRange", >i.Field{Name: "NoteRange", Type: "goki.dev/etable/v2/minmax.Int", LocalType: "minmax.Int", Doc: "range of notes in given track", Directives: gti.Directives{}, Tag: ""}},
+ {"NNotes", >i.Field{Name: "NNotes", Type: "int", LocalType: "int", Doc: "number of notes", Directives: gti.Directives{}, Tag: ""}},
+ {"Song", >i.Field{Name: "Song", Type: "goki.dev/etable/v2/etable.Table", LocalType: "etable.Table", Doc: "the song encoded into 200 msec increments, with columns as tracks", Directives: gti.Directives{}, Tag: ""}},
+ {"Time", >i.Field{Name: "Time", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "current time step", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Note", >i.Field{Name: "Note", Type: "goki.dev/etable/v2/etensor.Float32", LocalType: "etensor.Float32", Doc: "current note, rendered as a 4D tensor with shape:", Directives: gti.Directives{}, Tag: ""}},
+ {"NoteIdx", >i.Field{Name: "NoteIdx", Type: "int", LocalType: "int", Doc: "current note index", Directives: gti.Directives{}, Tag: ""}},
+ {"Player", >i.Field{Name: "Player", Type: "func(msg gitlab.com/gomidi/midi/v2.Message) error", LocalType: "func(msg midi.Message) error", Doc: "the function for playing midi", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"LastNotePlayed", >i.Field{Name: "LastNotePlayed", Type: "int", LocalType: "int", Doc: "for playing notes", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/deep_music/music_env.go b/examples/deep_music/music_env.go
index eacdf3d2d..3f060adb3 100644
--- a/examples/deep_music/music_env.go
+++ b/examples/deep_music/music_env.go
@@ -10,14 +10,14 @@ import (
"os"
"time"
- "github.com/emer/emergent/env"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/minmax"
+ "github.com/emer/emergent/v2/env"
"github.com/goki/ki/ints"
"gitlab.com/gomidi/midi/v2"
"gitlab.com/gomidi/midi/v2/gm"
"gitlab.com/gomidi/midi/v2/smf"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/minmax"
)
// MusicEnv reads in a midi SMF file and presents it as a sequence of notes.
@@ -26,55 +26,55 @@ import (
type MusicEnv struct {
// name of this environment
- Nm string `desc:"name of this environment"`
+ Nm string
// emit debugging messages about the music file
- Debug bool `desc:"emit debugging messages about the music file"`
+ Debug bool
// use only 1 octave of 12 notes for everything -- keeps it consistent
- WrapNotes bool `desc:"use only 1 octave of 12 notes for everything -- keeps it consistent"`
+ WrapNotes bool
- // [def: 120] number of time ticks per row in table -- note transitions that are faster than this will be lost
- TicksPer int `def:"120" desc:"number of time ticks per row in table -- note transitions that are faster than this will be lost"`
+ // number of time ticks per row in table -- note transitions that are faster than this will be lost
+ TicksPer int `def:"120"`
// which track to process
- Track int `desc:"which track to process"`
+ Track int
// play output as it steps
- Play bool `desc:"play output as it steps"`
+ Play bool
// limit song length to given number of steps, if > 0
- MaxSteps int `desc:"limit song length to given number of steps, if > 0"`
+ MaxSteps int
// time offset for data parallel = Song.Rows / (NData+1)
- DiOffset int `inactive:"+" desc:"time offset for data parallel = Song.Rows / (NData+1)"`
+ DiOffset int `inactive:"+"`
// number of units per localist note value
- UnitsPer int `desc:"number of units per localist note value"`
+ UnitsPer int
// range of notes in given track
- NoteRange minmax.Int `desc:"range of notes in given track"`
+ NoteRange minmax.Int
// number of notes
- NNotes int `desc:"number of notes"`
+ NNotes int
// the song encoded into 200 msec increments, with columns as tracks
- Song etable.Table `desc:"the song encoded into 200 msec increments, with columns as tracks"`
+ Song etable.Table
- // [view: inline] current time step
- Time env.Ctr `view:"inline" desc:"current time step"`
+ // current time step
+ Time env.Ctr `view:"inline"`
- // current note, rendered as a 4D tensor with shape: [1, NNotes, UnitsPer, 1]
- Note etensor.Float32 `desc:"current note, rendered as a 4D tensor with shape: [1, NNotes, UnitsPer, 1]"`
+ // current note, rendered as a 4D tensor with shape:
+ Note etensor.Float32
// current note index
- NoteIdx int `desc:"current note index"`
+ NoteIdx int
- // [view: -] the function for playing midi
- Player func(msg midi.Message) error `view:"-" desc:"the function for playing midi"`
+ // the function for playing midi
+ Player func(msg midi.Message) error `view:"-"`
- // [view: -] for playing notes
- LastNotePlayed int `view:"-" desc:"for playing notes"`
+ // for playing notes
+ LastNotePlayed int `view:"-"`
}
func (ev *MusicEnv) Name() string { return ev.Nm }
diff --git a/examples/deep_music/params.go b/examples/deep_music/params.go
index 1d50f1500..4af393112 100644
--- a/examples/deep_music/params.go
+++ b/examples/deep_music/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets is the default set of parameters -- Base is always applied, and others can be optionally
diff --git a/examples/deep_music/params_good/params_all.txt b/examples/deep_music/params_good/params_all.txt
index 97aebb529..4acf61eb5 100644
--- a/examples/deep_music/params_good/params_all.txt
+++ b/examples/deep_music/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: InputP
@@ -52,13 +52,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -78,7 +78,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 0
@@ -116,13 +116,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -142,7 +142,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -200,13 +200,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.005 GiHz: 200 Gi: 0.005 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.04 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.007 Tau: 300 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 300 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -226,7 +226,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 1 DecayTau: 50
diff --git a/examples/deep_music/params_good/params_layers.txt b/examples/deep_music/params_good/params_layers.txt
index 369ede77b..8f424c77b 100644
--- a/examples/deep_music/params_good/params_layers.txt
+++ b/examples/deep_music/params_good/params_layers.txt
@@ -1,10 +1,10 @@
Input Nominal: 0.05 Params: 30Notes:.InLay: 0.05 | .InLay: 0.025 | Layer: 0.1
Layer.Gi: 0.90 Params: Layer: 0.9
- InputP Nominal: 0.05 Params: 30Notes:.InLay: 0.05 | .InLay: 0.025 | Base:Layer: 0.1
- Layer.Gi: 1.00 Params: .PulvinarLayer: 1.0 | Layer: 0.9
+ InputP Nominal: 0.05 Params: 30Notes:.InLay: 0.05 | .InLay: 0.025 | Layer: 0.1
+ Layer.Gi: 1.00 Params: .PulvinarLayer: 1.0 | Base:Layer: 0.9
- Hidden Nominal: 0.10 Params: .SuperLayer: 0.1 | Layer: 0.1
+ Hidden Nominal: 0.10 Params: .SuperLayer: 0.1 | Base:Layer: 0.1
Layer.Gi: 0.90 Params: Layer: 0.9
HiddenCT Nominal: 0.12 Params: .CTLayer: 0.12 | Layer: 0.1
diff --git a/examples/hip/config.go b/examples/hip/config.go
index 4b95a17eb..6cd0ab55d 100644
--- a/examples/hip/config.go
+++ b/examples/hip/config.go
@@ -10,97 +10,97 @@ import "github.com/emer/axon/axon"
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: 0.9] mem % correct level (proportion) above which training on current list stops (switch from AB to AC or stop on AC)
- StopMem float32 `def:"0.9" desc:"mem % correct level (proportion) above which training on current list stops (switch from AB to AC or stop on AC)"`
+ // mem % correct level (proportion) above which training on current list stops (switch from AB to AC or stop on AC)
+ StopMem float32 `def:"0.9"`
- // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"true"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 5] [min: 1] total number of runs to do when running Train
- Runs int `def:"5" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ Runs int `def:"5" min:"1"`
- // [def: 100] total number of epochs per run
- Epochs int `def:"100" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ Epochs int `def:"100"`
- // [def: 20] total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `def:"20" desc:"total number of trials per epoch. Should be an even multiple of NData."`
+ // total number of trials per epoch. Should be an even multiple of NData.
+ NTrials int `def:"20"`
- // [def: 10] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
- NData int `def:"10" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."`
+ // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
+ NData int `def:"10" min:"1"`
- // [def: 1] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
- TestInterval int `def:"1" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"`
+ // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
+ TestInterval int `def:"1"`
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
- // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."`
+ // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
+ TestEpoch bool `def:"false" nest:"+"`
- // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."`
+ // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
+ TestTrial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// PatConfig have the pattern parameters
type PatConfig struct {
// minimum difference between item random patterns, as a proportion (0-1) of total active
- MinDiffPct float32 `desc:"minimum difference between item random patterns, as a proportion (0-1) of total active"`
+ MinDiffPct float32
// use drifting context representations -- otherwise does bit flips from prototype
- DriftCtxt bool `desc:"use drifting context representations -- otherwise does bit flips from prototype"`
+ DriftCtxt bool
// proportion (0-1) of active bits to flip for each context pattern, relative to a prototype, for non-drifting
- CtxtFlipPct float32 `desc:"proportion (0-1) of active bits to flip for each context pattern, relative to a prototype, for non-drifting"`
+ CtxtFlipPct float32
// percentage of active bits that drift, per step, for drifting context
- DriftPct float32 `desc:"percentage of active bits that drift, per step, for drifting context"`
+ DriftPct float32
}
func (pp *PatConfig) Defaults() {
@@ -111,14 +111,14 @@ func (pp *PatConfig) Defaults() {
type ModConfig struct {
// percent connectivity from Input to EC2
- InToEc2PCon float32 `desc:"percent connectivity from Input to EC2"`
+ InToEc2PCon float32
// percent activation in EC pool, used in patgen for input generation
// percent activation in EC pool, used in patgen for input generation
- ECPctAct float32 `desc:"percent activation in EC pool, used in patgen for input generation"`
+ ECPctAct float32
// memory threshold
- MemThr float64 `desc:"memory threshold"`
+ MemThr float64
}
func (mod *ModConfig) Defaults() {
@@ -144,31 +144,31 @@ func (mod *ModConfig) Defaults() {
type Config struct {
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: inline] misc model parameters
- Mod ModConfig `view:"inline" desc:"misc model parameters"`
+ // misc model parameters
+ Mod ModConfig `view:"inline"`
// Hippocampus sizing parameters
- Hip axon.HipConfig `desc:"Hippocampus sizing parameters"`
+ Hip axon.HipConfig
// parameters for the input patterns
- Pat PatConfig `desc:"parameters for the input patterns"`
+ Pat PatConfig
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) Defaults() {
diff --git a/examples/hip/def_params.go b/examples/hip/def_params.go
index 623bbc335..887699005 100644
--- a/examples/hip/def_params.go
+++ b/examples/hip/def_params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets is the default set of parameters -- Base is always applied, and others can be optionally
diff --git a/examples/hip/gtigen.go b/examples/hip/gtigen.go
new file mode 100644
index 000000000..c427f50af
--- /dev/null
+++ b/examples/hip/gtigen.go
@@ -0,0 +1,152 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"StopMem", >i.Field{Name: "StopMem", Type: "float32", LocalType: "float32", Doc: "mem % correct level (proportion) above which training on current list stops (switch from AB to AC or stop on AC)", Directives: gti.Directives{}, Tag: "def:\"0.9\""}},
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Runs", >i.Field{Name: "Runs", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"5\" min:\"1\""}},
+ {"Epochs", >i.Field{Name: "Epochs", Type: "int", LocalType: "int", Doc: "total number of epochs per run", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"NTrials", >i.Field{Name: "NTrials", Type: "int", LocalType: "int", Doc: "total number of trials per epoch. Should be an even multiple of NData.", Directives: gti.Directives{}, Tag: "def:\"20\""}},
+ {"NData", >i.Field{Name: "NData", Type: "int", LocalType: "int", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.", Directives: gti.Directives{}, Tag: "def:\"10\" min:\"1\""}},
+ {"TestInterval", >i.Field{Name: "TestInterval", Type: "int", LocalType: "int", Doc: "how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "bool", LocalType: "bool", Doc: "if true, save train epoch log to file, as .epc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Run", >i.Field{Name: "Run", Type: "bool", LocalType: "bool", Doc: "if true, save run log to file, as .run.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestEpoch", >i.Field{Name: "TestEpoch", Type: "bool", LocalType: "bool", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestTrial", >i.Field{Name: "TestTrial", Type: "bool", LocalType: "bool", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.PatConfig",
+ ShortName: "main.PatConfig",
+ IDName: "pat-config",
+ Doc: "PatConfig have the pattern parameters",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"MinDiffPct", >i.Field{Name: "MinDiffPct", Type: "float32", LocalType: "float32", Doc: "minimum difference between item random patterns, as a proportion (0-1) of total active", Directives: gti.Directives{}, Tag: ""}},
+ {"DriftCtxt", >i.Field{Name: "DriftCtxt", Type: "bool", LocalType: "bool", Doc: "use drifting context representations -- otherwise does bit flips from prototype", Directives: gti.Directives{}, Tag: ""}},
+ {"CtxtFlipPct", >i.Field{Name: "CtxtFlipPct", Type: "float32", LocalType: "float32", Doc: "proportion (0-1) of active bits to flip for each context pattern, relative to a prototype, for non-drifting", Directives: gti.Directives{}, Tag: ""}},
+ {"DriftPct", >i.Field{Name: "DriftPct", Type: "float32", LocalType: "float32", Doc: "percentage of active bits that drift, per step, for drifting context", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ModConfig",
+ ShortName: "main.ModConfig",
+ IDName: "mod-config",
+ Doc: "",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"InToEc2PCon", >i.Field{Name: "InToEc2PCon", Type: "float32", LocalType: "float32", Doc: "percent connectivity from Input to EC2", Directives: gti.Directives{}, Tag: ""}},
+ {"ECPctAct", >i.Field{Name: "ECPctAct", Type: "float32", LocalType: "float32", Doc: "percent activation in EC pool, used in patgen for input generation\npercent activation in EC pool, used in patgen for input generation", Directives: gti.Directives{}, Tag: ""}},
+ {"MemThr", >i.Field{Name: "MemThr", Type: "float64", LocalType: "float64", Doc: "memory threshold", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Mod", >i.Field{Name: "Mod", Type: "github.com/emer/axon/examples/hip.ModConfig", LocalType: "ModConfig", Doc: "misc model parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Hip", >i.Field{Name: "Hip", Type: "github.com/emer/axon/axon.HipConfig", LocalType: "axon.HipConfig", Doc: "Hippocampus sizing parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Pat", >i.Field{Name: "Pat", Type: "github.com/emer/axon/examples/hip.PatConfig", LocalType: "PatConfig", Doc: "parameters for the input patterns", Directives: gti.Directives{}, Tag: ""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/hip.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/hip.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/hip.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/hip.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "all parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"PretrainMode", >i.Field{Name: "PretrainMode", Type: "bool", LocalType: "bool", Doc: "if true, run in pretrain mode", Directives: gti.Directives{}, Tag: ""}},
+ {"PoolVocab", >i.Field{Name: "PoolVocab", Type: "github.com/emer/emergent/v2/patgen.Vocab", LocalType: "patgen.Vocab", Doc: "pool patterns vocabulary", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TrainAB", >i.Field{Name: "TrainAB", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "AB training patterns to use", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TrainAC", >i.Field{Name: "TrainAC", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "AC training patterns to use", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TestAB", >i.Field{Name: "TestAB", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "AB testing patterns to use", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TestAC", >i.Field{Name: "TestAC", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "AC testing patterns to use", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"PreTrainLure", >i.Field{Name: "PreTrainLure", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "Lure pretrain patterns to use", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TestLure", >i.Field{Name: "TestLure", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "Lure testing patterns to use", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TrainAll", >i.Field{Name: "TrainAll", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "all training patterns -- for pretrain", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TestABAC", >i.Field{Name: "TestABAC", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "TestAB + TestAC", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Envs", >i.Field{Name: "Envs", Type: "github.com/emer/emergent/v2/env.Envs", LocalType: "env.Envs", Doc: "Environments", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/hip/hip.go b/examples/hip/hip.go
index 11d4a8397..5f83fc8fa 100644
--- a/examples/hip/hip.go
+++ b/examples/hip/hip.go
@@ -5,6 +5,8 @@
// hip runs a hippocampus model for testing parameters and new learning ideas
package main
+//go:generate goki generate -add-types
+
import (
"fmt"
"log"
@@ -15,26 +17,26 @@ import (
"strings"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/metric"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/ki/bools"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/metric"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/glop/num"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -42,7 +44,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(func() {
+ gimain.Run(func() {
sim.RunGUI()
})
} else {
@@ -60,67 +62,67 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.NetParams `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
// if true, run in pretrain mode
- PretrainMode bool `desc:"if true, run in pretrain mode"`
+ PretrainMode bool
- // [view: no-inline] pool patterns vocabulary
- PoolVocab patgen.Vocab `view:"no-inline" desc:"pool patterns vocabulary"`
+ // pool patterns vocabulary
+ PoolVocab patgen.Vocab `view:"no-inline"`
- // [view: no-inline] AB training patterns to use
- TrainAB *etable.Table `view:"no-inline" desc:"AB training patterns to use"`
+ // AB training patterns to use
+ TrainAB *etable.Table `view:"no-inline"`
- // [view: no-inline] AC training patterns to use
- TrainAC *etable.Table `view:"no-inline" desc:"AC training patterns to use"`
+ // AC training patterns to use
+ TrainAC *etable.Table `view:"no-inline"`
- // [view: no-inline] AB testing patterns to use
- TestAB *etable.Table `view:"no-inline" desc:"AB testing patterns to use"`
+ // AB testing patterns to use
+ TestAB *etable.Table `view:"no-inline"`
- // [view: no-inline] AC testing patterns to use
- TestAC *etable.Table `view:"no-inline" desc:"AC testing patterns to use"`
+ // AC testing patterns to use
+ TestAC *etable.Table `view:"no-inline"`
- // [view: no-inline] Lure pretrain patterns to use
- PreTrainLure *etable.Table `view:"no-inline" desc:"Lure pretrain patterns to use"`
+ // Lure pretrain patterns to use
+ PreTrainLure *etable.Table `view:"no-inline"`
- // [view: no-inline] Lure testing patterns to use
- TestLure *etable.Table `view:"no-inline" desc:"Lure testing patterns to use"`
+ // Lure testing patterns to use
+ TestLure *etable.Table `view:"no-inline"`
- // [view: no-inline] all training patterns -- for pretrain
- TrainAll *etable.Table `view:"no-inline" desc:"all training patterns -- for pretrain"`
+ // all training patterns -- for pretrain
+ TrainAll *etable.Table `view:"no-inline"`
- // [view: no-inline] TestAB + TestAC
- TestABAC *etable.Table `view:"no-inline" desc:"TestAB + TestAC"`
+ // TestAB + TestAC
+ TestABAC *etable.Table `view:"no-inline"`
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -656,9 +658,9 @@ func (ss *Sim) MemStats(mode etime.Modes, di int) {
ss.Stats.SetInt("RecallItem", mostSimilar)
if isAB {
- ss.Stats.SetFloat("ABRecMem", bools.ToFloat64(mostSimilar == correctIdx))
+ ss.Stats.SetFloat("ABRecMem", num.FromBool[float64](mostSimilar == correctIdx))
} else {
- ss.Stats.SetFloat("ACRecMem", bools.ToFloat64(mostSimilar == correctIdx))
+ ss.Stats.SetFloat("ACRecMem", num.FromBool[float64](mostSimilar == correctIdx))
}
}
@@ -776,10 +778,10 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
////////////////////////////////////////////////////////////////////////////////////////////
// Gui
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "Axon Hippocampus"
- ss.GUI.MakeWindow(ss, "hip", title, `Benchmarking`)
+ ss.GUI.MakeBody(ss, "hip", title, `Benchmarking`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -788,59 +790,61 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.ViewUpdt.Config(nv, etime.Phase, etime.Phase)
ss.GUI.ViewUpdt = &ss.ViewUpdt
- nv.Scene().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
- nv.Scene().Camera.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
+ nv.SceneXYZ().Camera.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
ss.GUI.AddPlots(title, &ss.Logs)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Test Init", Icon: "update",
- Tooltip: "Call ResetCountersByMode with test mode and update GUI.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.TestInit()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Test Init", Icon: "update",
+ Tooltip: "Call ResetCountersByMode with test mode and update GUI.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.TestInit()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train, etime.Test})
-
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/hip/README.md")
- },
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train, etime.Test})
+
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/hip/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
@@ -850,13 +854,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/hip/orig_params.go b/examples/hip/orig_params.go
index 7cc9c0115..7faf2a05a 100644
--- a/examples/hip/orig_params.go
+++ b/examples/hip/orig_params.go
@@ -6,7 +6,7 @@
package main
-import "github.com/emer/emergent/params"
+import "github.com/emer/emergent/v2/params"
// OrigParamSets is the original hip model params, prior to optimization in 2/2020
var OrigParamSets = params.Sets{
diff --git a/examples/hip/params_good/config.toml b/examples/hip/params_good/config.toml
index ed76a7266..4c521c852 100644
--- a/examples/hip/params_good/config.toml
+++ b/examples/hip/params_good/config.toml
@@ -20,8 +20,9 @@ Debug = false
MossyDeltaTest = 0.75
ThetaLow = 0.9
ThetaHigh = 1.0
+ EC5Clamp = true
EC5ClampSrc = "EC3"
- EC5ClampTest = true
+ EC5ClampTest = false
EC5ClampThr = 0.1
[Hip.EC2Size]
X = 21
diff --git a/examples/hip/params_good/params_all.txt b/examples/hip/params_good/params_all.txt
index 590da24f8..94266ed2e 100644
--- a/examples/hip/params_good/params_all.txt
+++ b/examples/hip/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: EC2
@@ -52,13 +52,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -78,7 +78,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -156,13 +156,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -182,7 +182,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -223,13 +223,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -249,7 +249,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -333,13 +333,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1.4 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -359,7 +359,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -417,13 +417,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -443,7 +443,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -530,13 +530,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1.4 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -556,7 +556,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: CA1ToEC5
diff --git a/examples/hip/params_good/params_layers.txt b/examples/hip/params_good/params_layers.txt
index af94f6b63..abe602b62 100644
--- a/examples/hip/params_good/params_layers.txt
+++ b/examples/hip/params_good/params_layers.txt
@@ -7,8 +7,8 @@
DG Nominal: 0.01 Params: Base:#DG: 0.01
Layer.Gi: 2.40 Params: #DG: 2.4
- CA3 Nominal: 0.01 Params: Base:#CA3: 0.01
- Layer.Gi: 1.20 Params: #CA3: 1.2
+ CA3 Nominal: 0.01 Params: #CA3: 0.01
+ Layer.Gi: 1.20 Params: Base:#CA3: 1.2
EC3 Nominal: 0.05 Params: .EC: 0.05
Pool.Gi: 1.10 Params: .EC: 1.1
diff --git a/examples/inhib/config.go b/examples/inhib/config.go
index 0afe00107..b1a76aea4 100644
--- a/examples/inhib/config.go
+++ b/examples/inhib/config.go
@@ -4,7 +4,7 @@
package main
-import "github.com/emer/emergent/evec"
+import "github.com/emer/emergent/v2/evec"
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
@@ -12,86 +12,86 @@ import "github.com/emer/emergent/evec"
type EnvConfig struct {
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
- Env map[string]any `desc:"env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"`
+ Env map[string]any
- // [def: 15] [min: 5] [max: 50] [step: 1] percent of active units in input layer (literally number of active units, because input has 100 units total)
- InputPct float32 `def:"15" min:"5" max:"50" step:"1" desc:"percent of active units in input layer (literally number of active units, because input has 100 units total)"`
+ // percent of active units in input layer (literally number of active units, because input has 100 units total)
+ InputPct float32 `def:"15" min:"5" max:"50" step:"1"`
}
// ParamConfig has config parameters related to sim params
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
- // [def: 2] [min: 1] number of hidden layers to add
- NLayers int `def:"2" min:"1" desc:"number of hidden layers to add"`
+ // number of hidden layers to add
+ NLayers int `def:"2" min:"1"`
- // [def: {'X':10,'Y':10}] size of hidden layers
- HidSize evec.Vec2i `def:"{'X':10,'Y':10}" desc:"size of hidden layers"`
+ // size of hidden layers
+ HidSize evec.Vec2i `def:"{'X':10,'Y':10}"`
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"true"`
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
type Config struct {
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] environment configuration options
- Env EnvConfig `view:"add-fields" desc:"environment configuration options"`
+ // environment configuration options
+ Env EnvConfig `view:"add-fields"`
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/inhib/gtigen.go b/examples/inhib/gtigen.go
new file mode 100644
index 000000000..2a0768e51
--- /dev/null
+++ b/examples/inhib/gtigen.go
@@ -0,0 +1,113 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.EnvConfig",
+ ShortName: "main.EnvConfig",
+ IDName: "env-config",
+ Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Env", >i.Field{Name: "Env", Type: "map[string]any", LocalType: "map[string]any", Doc: "env parameters -- can set any field/subfield on Env struct, using standard TOML formatting", Directives: gti.Directives{}, Tag: ""}},
+ {"InputPct", >i.Field{Name: "InputPct", Type: "float32", LocalType: "float32", Doc: "percent of active units in input layer (literally number of active units, because input has 100 units total)", Directives: gti.Directives{}, Tag: "def:\"15\" min:\"5\" max:\"50\" step:\"1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"NLayers", >i.Field{Name: "NLayers", Type: "int", LocalType: "int", Doc: "number of hidden layers to add", Directives: gti.Directives{}, Tag: "def:\"2\" min:\"1\""}},
+ {"HidSize", >i.Field{Name: "HidSize", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "size of hidden layers", Directives: gti.Directives{}, Tag: "def:\"{'X':10,'Y':10}\""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Epoch", >i.Field{Name: "Epoch", Type: "bool", LocalType: "bool", Doc: "if true, save train epoch log to file, as .epc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Env", >i.Field{Name: "Env", Type: "github.com/emer/axon/examples/inhib.EnvConfig", LocalType: "EnvConfig", Doc: "environment configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/inhib.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/inhib.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/inhib.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/inhib.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "all parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"Pats", >i.Field{Name: "Pats", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "the training patterns to use", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/inhib/inhib.go b/examples/inhib/inhib.go
index 6c4d35290..3825217f0 100644
--- a/examples/inhib/inhib.go
+++ b/examples/inhib/inhib.go
@@ -9,31 +9,33 @@ feedforward and feedback inhibition to excitatory pyramidal neurons.
*/
package main
+//go:generate goki generate -add-types
+
import (
"fmt"
"math/rand"
"os"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -41,7 +43,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -57,37 +59,37 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.NetParams `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] the training patterns to use
- Pats *etable.Table `view:"no-inline" desc:"the training patterns to use"`
+ // the training patterns to use
+ Pats *etable.Table `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -345,10 +347,10 @@ func (ss *Sim) InitStats() {
// StatCounters saves current counters to Stats, so they are available for logging etc
// Also saves a string rep of them for ViewUpdt.Text
func (ss *Sim) StatCounters() {
- var mode etime.Modes
- mode.FromString(ss.Context.Mode.String())
+ ctx := &ss.Context
+ mode := ctx.Mode
ss.Loops.Stacks[mode].CtrsToStats(&ss.Stats)
- ss.Stats.SetInt("Cycle", int(ss.Context.Cycle))
+ ss.Stats.SetInt("Cycle", int(ctx.Cycle))
}
func (ss *Sim) NetViewCounters(tm etime.Times) {
@@ -510,10 +512,10 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
////////////////////////////////////////////////////////////////////////////////////////////
// Gui
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "Axon Inhibition Test"
- ss.GUI.MakeWindow(ss, "inhib", title, `This tests inhibition based on interneurons and inhibition functions. See emergent on GitHub.`)
+ ss.GUI.MakeBody(ss, "inhib", title, `This tests inhibition based on interneurons and inhibition functions. See emergent on GitHub.`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -522,59 +524,60 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.ViewUpdt.Config(nv, etime.AlphaCycle, etime.AlphaCycle)
ss.GUI.ViewUpdt = &ss.ViewUpdt
- nv.Scene().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
- nv.Scene().Camera.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
+ nv.SceneXYZ().Camera.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
ss.GUI.AddPlots(title, &ss.Logs)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Test})
-
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/inhib/README.md")
- },
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Test})
+
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/inhib/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/inhib/params.go b/examples/inhib/params.go
index 8c7103991..b2cef968c 100644
--- a/examples/inhib/params.go
+++ b/examples/inhib/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets is the default set of parameters -- Base is always applied, and others can be optionally
diff --git a/examples/inhib/params_good/params_all.txt b/examples/inhib/params_good/params_all.txt
index e1f13c425..bf6ef3c74 100644
--- a/examples/inhib/params_good/params_all.txt
+++ b/examples/inhib/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 0.6 Add: false ErrThr: 0.5 }
Noise: { On: true GeHz: 100 Ge: 0.002 GiHz: 200 Gi: 0.002 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: Layer1
@@ -52,13 +52,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.002 GiHz: 200 Gi: 0.002 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -78,7 +78,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -156,13 +156,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.01 GiHz: 200 Gi: 0 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: false TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -182,7 +182,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -280,13 +280,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.002 GiHz: 200 Gi: 0.002 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -306,7 +306,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -384,13 +384,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.01 GiHz: 200 Gi: 0 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: false TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -410,7 +410,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
diff --git a/examples/inhib/params_good/params_layers.txt b/examples/inhib/params_good/params_layers.txt
index 325180e24..d1a011a21 100644
--- a/examples/inhib/params_good/params_layers.txt
+++ b/examples/inhib/params_good/params_layers.txt
@@ -1,14 +1,14 @@
Layer0 Nominal: 0.10 Params: Layer: 0.1
- Layer.Gi: 1.00 Params: Layer: 1.0 | Base:Layer: 1.0
+ Layer.Gi: 1.00 Params: Layer: 1.0 | Layer: 1.0
Layer1 Nominal: 0.10 Params: Layer: 0.1
- Layer.Gi: 1.00 Params: FSFFFB:Layer: 1.0 | Layer: 1.0
+ Layer.Gi: 1.00 Params: Layer: 1.0 | Layer: 1.0
Inhib1 Nominal: 0.50 Params: .InhibLay: 0.5 | Layer: 0.1
- Layer.Gi: 1.00 Params: Layer: 1.0 | Base:Layer: 1.0
+ Layer.Gi: 1.00 Params: FSFFFB:Layer: 1.0 | Layer: 1.0
Layer2 Nominal: 0.10 Params: Layer: 0.1
- Layer.Gi: 1.00 Params: Layer: 1.0 | Layer: 1.0
+ Layer.Gi: 1.00 Params: Layer: 1.0 | Base:Layer: 1.0
Inhib2 Nominal: 0.50 Params: .InhibLay: 0.5 | Layer: 0.1
Layer.Gi: 1.00 Params: Layer: 1.0 | Layer: 1.0
diff --git a/examples/kinaseq/kinaseq.go b/examples/kinaseq/kinaseq.go
index 99af23e91..770301b5a 100644
--- a/examples/kinaseq/kinaseq.go
+++ b/examples/kinaseq/kinaseq.go
@@ -12,16 +12,16 @@ import (
"math/rand"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/emer"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
+ "github.com/emer/emergent/v2/emer"
"github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -32,7 +32,7 @@ func main() {
}
func guirun() {
- win := TheSim.ConfigGui()
+ win := TheSim.ConfigGUI()
win.StartEventLoop()
}
@@ -42,95 +42,95 @@ const LogPrec = 4
// Sim holds the params, table, etc
type Sim struct {
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: no-inline] the sending neuron
- SendNeur *axon.Neuron `view:"no-inline" desc:"the sending neuron"`
+ // the sending neuron
+ SendNeur *axon.Neuron `view:"no-inline"`
- // [view: no-inline] the receiving neuron
- RecvNeur *axon.Neuron `view:"no-inline" desc:"the receiving neuron"`
+ // the receiving neuron
+ RecvNeur *axon.Neuron `view:"no-inline"`
- // [view: no-inline] prjn-level parameters -- for intializing synapse -- other params not used
- Prjn *axon.Prjn `view:"no-inline" desc:"prjn-level parameters -- for intializing synapse -- other params not used"`
+ // prjn-level parameters -- for intializing synapse -- other params not used
+ Prjn *axon.Prjn `view:"no-inline"`
- // [view: no-inline] extra neuron state
- NeuronEx NeuronEx `view:"no-inline" desc:"extra neuron state"`
+ // extra neuron state
+ NeuronEx NeuronEx `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.Params `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.Params `view:"inline"`
// multiplier on product factor to equate to SynC
- PGain float32 `desc:"multiplier on product factor to equate to SynC"`
+ PGain float32
// spike multiplier for display purposes
- SpikeDisp float32 `desc:"spike multiplier for display purposes"`
+ SpikeDisp float32
// use current Ge clamping for recv neuron -- otherwise spikes driven externally
- RGeClamp bool `desc:"use current Ge clamping for recv neuron -- otherwise spikes driven externally"`
+ RGeClamp bool
// gain multiplier for RGe clamp
- RGeGain float32 `desc:"gain multiplier for RGe clamp"`
+ RGeGain float32
// baseline recv Ge level
- RGeBase float32 `desc:"baseline recv Ge level"`
+ RGeBase float32
// baseline recv Gi level
- RGiBase float32 `desc:"baseline recv Gi level"`
+ RGiBase float32
// number of repetitions -- if > 1 then only final @ end of Dur shown
- NTrials int `desc:"number of repetitions -- if > 1 then only final @ end of Dur shown"`
+ NTrials int
// number of msec in minus phase
- MinusMsec int `desc:"number of msec in minus phase"`
+ MinusMsec int
// number of msec in plus phase
- PlusMsec int `desc:"number of msec in plus phase"`
+ PlusMsec int
// quiet space between spiking
- ISIMsec int `desc:"quiet space between spiking"`
+ ISIMsec int
- // [view: -] total trial msec: minus, plus isi
- TrialMsec int `view:"-" desc:"total trial msec: minus, plus isi"`
+ // total trial msec: minus, plus isi
+ TrialMsec int `view:"-"`
// minus phase firing frequency
- MinusHz int `desc:"minus phase firing frequency"`
+ MinusHz int
// plus phase firing frequency
- PlusHz int `desc:"plus phase firing frequency"`
+ PlusHz int
// additive difference in sending firing frequency relative to recv (recv has basic minus, plus)
- SendDiffHz int `desc:"additive difference in sending firing frequency relative to recv (recv has basic minus, plus)"`
+ SendDiffHz int
- // [view: no-inline] synapse state values, NST_ in log
- SynNeurTheta axon.Synapse `view:"no-inline" desc:"synapse state values, NST_ in log"`
+ // synapse state values, NST_ in log
+ SynNeurTheta axon.Synapse `view:"no-inline"`
- // [view: no-inline] synapse state values, SST_ in log
- SynSpkTheta axon.Synapse `view:"no-inline" desc:"synapse state values, SST_ in log"`
+ // synapse state values, SST_ in log
+ SynSpkTheta axon.Synapse `view:"no-inline"`
- // [view: no-inline] synapse state values, SSC_ in log
- SynSpkCont axon.Synapse `view:"no-inline" desc:"synapse state values, SSC_ in log"`
+ // synapse state values, SSC_ in log
+ SynSpkCont axon.Synapse `view:"no-inline"`
- // [view: no-inline] synapse state values, SNC_ in log
- SynNMDACont axon.Synapse `view:"no-inline" desc:"synapse state values, SNC_ in log"`
+ // synapse state values, SNC_ in log
+ SynNMDACont axon.Synapse `view:"no-inline"`
// axon time recording
- Context axon.Context `desc:"axon time recording"`
+ Context axon.Context
- // [view: no-inline] all logs
- Logs map[string]*etable.Table `view:"no-inline" desc:"all logs"`
+ // all logs
+ Logs map[string]*etable.Table `view:"no-inline"`
- // [view: -] all plots
- Plots map[string]*eplot.Plot2D `view:"-" desc:"all plots"`
+ // all plots
+ Plots map[string]*eplot.Plot2D `view:"-"`
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
+ // main GUI window
+ Win *gi.Window `view:"-"`
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the master toolbar
+ ToolBar *gi.ToolBar `view:"-"`
- // [view: -] stop button
- StopNow bool `view:"-" desc:"stop button"`
+ // stop button
+ StopNow bool `view:"-"`
}
// TheSim is the overall state for this simulation
@@ -339,8 +339,8 @@ func (ss *Sim) TrialImpl(minusHz, plusHz int) {
}
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() *gi.Window {
width := 1600
height := 1200
diff --git a/examples/kinaseq/neuron.go b/examples/kinaseq/neuron.go
index c46bc5138..04df2f73f 100644
--- a/examples/kinaseq/neuron.go
+++ b/examples/kinaseq/neuron.go
@@ -12,12 +12,12 @@ import (
"strings"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
)
// ParamSets for basic parameters
@@ -81,22 +81,22 @@ var ParamSets = params.Sets{
type NeuronEx struct {
// time of last sending spike
- SCaUpT int `desc:"time of last sending spike"`
+ SCaUpT int
// time of last recv spike
- RCaUpT int `desc:"time of last recv spike"`
+ RCaUpT int
// sending poisson firing probability accumulator
- Sp float32 `desc:"sending poisson firing probability accumulator"`
+ Sp float32
// recv poisson firing probability accumulator
- Rp float32 `desc:"recv poisson firing probability accumulator"`
+ Rp float32
// NMDA mg-based blocking conductance
- NMDAGmg float32 `desc:"NMDA mg-based blocking conductance"`
+ NMDAGmg float32
// when 0, it is time to learn according to theta cycle, otherwise increments up unless still -1 from init
- LearnNow float32 `desc:"when 0, it is time to learn according to theta cycle, otherwise increments up unless still -1 from init"`
+ LearnNow float32
}
func (nex *NeuronEx) Init() {
diff --git a/examples/mpi/Makefile b/examples/mpi/Makefile
index 234bf88c0..6e568f284 100644
--- a/examples/mpi/Makefile
+++ b/examples/mpi/Makefile
@@ -6,30 +6,30 @@ all:
# CPU
nompi_cpu_nd1:
- ./mpi -nogui -ndata=1 -threads=4 -tag=nompi_cpu_nd1 &
+ ./mpi -nogui -ndata=1 -nthreads=4 -tag=nompi_cpu_nd1 &
nompi_cpu_nd4:
- ./mpi -nogui -ndata=4 -threads=4 -tag=nompi_cpu_nd4 &
+ ./mpi -nogui -ndata=4 -nthreads=4 -tag=nompi_cpu_nd4 &
nompi_cpu_nd12:
- ./mpi -nogui -ndata=12 -threads=4 -tag=nompi_cpu_nd12 &
+ ./mpi -nogui -ndata=12 -nthreads=4 -tag=nompi_cpu_nd12 &
mpi_cpu2:
- mpirun -np 2 ./mpi -nogui -mpi -ndata=4 -threads=4 -tag=mpi2_cpu_nd4 &
+ mpirun -np 2 ./mpi -nogui -mpi -ndata=4 -nthreads=4 -tag=mpi2_cpu_nd4 &
mpi_cpu4:
- mpirun -np 4 ./mpi -nogui -mpi -ndata=3 -threads=2 -tag=mpi4_cpu_nd3 &
+ mpirun -np 4 ./mpi -nogui -mpi -ndata=3 -nthreads=2 -tag=mpi4_cpu_nd3 &
# this is the fastest config on macbookpro 8 cores
mpi_cpu8_th1:
- mpirun -np 8 ./mpi -nogui -mpi -ndata=1 -threads=1 -tag=mpi8_cpu_nd1_th1 &
+ mpirun -np 8 ./mpi -nogui -mpi -ndata=1 -nthreads=1 -tag=mpi8_cpu_nd1_th1 &
# too many threads is deadly!
mpi_cpu8_th2:
- mpirun -np 8 ./mpi -nogui -mpi -ndata=1 -threads=2 -tag=mpi8_cpu_nd1_th2 &
+ mpirun -np 8 ./mpi -nogui -mpi -ndata=1 -nthreads=2 -tag=mpi8_cpu_nd1_th2 &
mpi_cpu8_th4:
- mpirun -np 8 ./mpi -nogui -mpi -ndata=1 -threads=4 -tag=mpi8_cpu_nd1_th4 &
+ mpirun -np 8 ./mpi -nogui -mpi -ndata=1 -nthreads=4 -tag=mpi8_cpu_nd1_th4 &
# GPU
diff --git a/examples/mpi/gtigen.go b/examples/mpi/gtigen.go
new file mode 100644
index 000000000..61ddfbe68
--- /dev/null
+++ b/examples/mpi/gtigen.go
@@ -0,0 +1,115 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Hidden1Size", >i.Field{Name: "Hidden1Size", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "size of hidden layer -- can use emer.LaySize for 4D layers", Directives: gti.Directives{}, Tag: "def:\"{'X':10,'Y':10}\" nest:\"+\""}},
+ {"Hidden2Size", >i.Field{Name: "Hidden2Size", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "size of hidden layer -- can use emer.LaySize for 4D layers", Directives: gti.Directives{}, Tag: "def:\"{'X':10,'Y':10}\" nest:\"+\""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"MPI", >i.Field{Name: "MPI", Type: "bool", LocalType: "bool", Doc: "use MPI message passing interface for data parallel computation between nodes running identical copies of the same sim, sharing DWt changes", Directives: gti.Directives{}, Tag: ""}},
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16", Directives: gti.Directives{}, Tag: "def:\"false\""}},
+ {"NData", >i.Field{Name: "NData", Type: "int", LocalType: "int", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.", Directives: gti.Directives{}, Tag: "def:\"16\" min:\"1\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"NRuns", >i.Field{Name: "NRuns", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"5\" min:\"1\""}},
+ {"NEpochs", >i.Field{Name: "NEpochs", Type: "int", LocalType: "int", Doc: "total number of epochs per run", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"NZero", >i.Field{Name: "NZero", Type: "int", LocalType: "int", Doc: "stop run after this number of perfect, zero-error epochs", Directives: gti.Directives{}, Tag: "def:\"2\""}},
+ {"NTrials", >i.Field{Name: "NTrials", Type: "int", LocalType: "int", Doc: "total number of trials per epoch. Should be an even multiple of NData.", Directives: gti.Directives{}, Tag: "def:\"32\""}},
+ {"TestInterval", >i.Field{Name: "TestInterval", Type: "int", LocalType: "int", Doc: "how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing", Directives: gti.Directives{}, Tag: "def:\"5\""}},
+ {"PCAInterval", >i.Field{Name: "PCAInterval", Type: "int", LocalType: "int", Doc: "how frequently (in epochs) to compute PCA on hidden representations to measure variance?", Directives: gti.Directives{}, Tag: "def:\"5\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "bool", LocalType: "bool", Doc: "if true, save train epoch log to file, as .epc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Run", >i.Field{Name: "Run", Type: "bool", LocalType: "bool", Doc: "if true, save run log to file, as .run.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestEpoch", >i.Field{Name: "TestEpoch", Type: "bool", LocalType: "bool", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestTrial", >i.Field{Name: "TestTrial", Type: "bool", LocalType: "bool", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/mpi.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/mpi.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/mpi.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/mpi.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "network parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"Pats", >i.Field{Name: "Pats", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "the training patterns to use", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Envs", >i.Field{Name: "Envs", Type: "github.com/emer/emergent/v2/env.Envs", LocalType: "env.Envs", Doc: "Environments", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Comm", >i.Field{Name: "Comm", Type: "*github.com/emer/empi/v2/mpi.Comm", LocalType: "*mpi.Comm", Doc: "mpi communicator", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"AllDWts", >i.Field{Name: "AllDWts", Type: "[]float32", LocalType: "[]float32", Doc: "buffer of all dwt weight changes -- for mpi sharing", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/mpi/params.go b/examples/mpi/params.go
index 6d80d1043..ce366033a 100644
--- a/examples/mpi/params.go
+++ b/examples/mpi/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets sets the minimal non-default params
diff --git a/examples/mpi/params_good/params_all.txt b/examples/mpi/params_good/params_all.txt
index e0f3795aa..13e396238 100644
--- a/examples/mpi/params_good/params_all.txt
+++ b/examples/mpi/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: Hidden1
@@ -52,13 +52,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -78,7 +78,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -136,13 +136,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -162,7 +162,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -220,13 +220,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -246,7 +246,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: Hidden2ToOutput
diff --git a/examples/mpi/params_good/params_layers.txt b/examples/mpi/params_good/params_layers.txt
index 24e784e74..401556c38 100644
--- a/examples/mpi/params_good/params_layers.txt
+++ b/examples/mpi/params_good/params_layers.txt
@@ -5,8 +5,8 @@
Layer.Gi: 1.05 Params: Layer: 1.05
Hidden2 Nominal: 0.06 Params: Layer: 0.06
- Layer.Gi: 1.05 Params: Layer: 1.05
+ Layer.Gi: 1.05 Params: Base:Layer: 1.05
Output Nominal: 0.24 Params: #Output: 0.24 | Layer: 0.06
- Layer.Gi: 0.65 Params: #Output: 0.65 | Base:Layer: 1.05
+ Layer.Gi: 0.65 Params: #Output: 0.65 | Layer: 1.05
diff --git a/examples/mpi/ra25.go b/examples/mpi/ra25.go
index 76b4e9b3d..adae6cc13 100644
--- a/examples/mpi/ra25.go
+++ b/examples/mpi/ra25.go
@@ -6,32 +6,34 @@
// across multiple nodes, sharing DWt changes via MPI.
package main
+//go:generate goki generate -add-types
+
import (
"fmt"
"log"
"os"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/evec"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/empi/empi"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/evec"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/empi/v2/empi"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -39,7 +41,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -51,115 +53,115 @@ func main() {
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
- // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers
- Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"`
+ // size of hidden layer -- can use emer.LaySize for 4D layers
+ Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"`
- // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers
- Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"`
+ // size of hidden layer -- can use emer.LaySize for 4D layers
+ Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"`
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
// use MPI message passing interface for data parallel computation between nodes running identical copies of the same sim, sharing DWt changes
- MPI bool `desc:"use MPI message passing interface for data parallel computation between nodes running identical copies of the same sim, sharing DWt changes"`
+ MPI bool
- // [def: false] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"false" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"false"`
- // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
- NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."`
+ // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
+ NData int `def:"16" min:"1"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 5] [min: 1] total number of runs to do when running Train
- NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"5" min:"1"`
- // [def: 100] total number of epochs per run
- NEpochs int `def:"100" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"100"`
- // [def: 2] stop run after this number of perfect, zero-error epochs
- NZero int `def:"2" desc:"stop run after this number of perfect, zero-error epochs"`
+ // stop run after this number of perfect, zero-error epochs
+ NZero int `def:"2"`
- // [def: 32] total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `def:"32" desc:"total number of trials per epoch. Should be an even multiple of NData."`
+ // total number of trials per epoch. Should be an even multiple of NData.
+ NTrials int `def:"32"`
- // [def: 5] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
- TestInterval int `def:"5" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"`
+ // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
+ TestInterval int `def:"5"`
- // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance?
- PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"`
+ // how frequently (in epochs) to compute PCA on hidden representations to measure variance?
+ PCAInterval int `def:"5"`
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
- // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."`
+ // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
+ TestEpoch bool `def:"false" nest:"+"`
- // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."`
+ // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
+ TestTrial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
type Config struct {
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
@@ -172,46 +174,46 @@ func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] network parameter management
- Params emer.NetParams `view:"inline" desc:"network parameter management"`
+ // network parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] the training patterns to use
- Pats *etable.Table `view:"no-inline" desc:"the training patterns to use"`
+ // the training patterns to use
+ Pats *etable.Table `view:"no-inline"`
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
- // [view: -] mpi communicator
- Comm *mpi.Comm `view:"-" desc:"mpi communicator"`
+ // mpi communicator
+ Comm *mpi.Comm `view:"-"`
- // [view: -] buffer of all dwt weight changes -- for mpi sharing
- AllDWts []float32 `view:"-" desc:"buffer of all dwt weight changes -- for mpi sharing"`
+ // buffer of all dwt weight changes -- for mpi sharing
+ AllDWts []float32 `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -696,10 +698,10 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
////////////////////////////////////////////////////////////////////////////////////////////
// Gui
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "Axon Random Associator"
- ss.GUI.MakeWindow(ss, "ra25", title, `This demonstrates a basic Axon model. See emergent on GitHub.`)
+ ss.GUI.MakeBody(ss, "ra25", title, `This demonstrates a basic Axon model. See emergent on GitHub.`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -708,50 +710,52 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.ViewUpdt.Config(nv, etime.Phase, etime.Phase)
ss.GUI.ViewUpdt = &ss.ViewUpdt
- nv.Scene().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
- nv.Scene().Camera.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
+ nv.SceneXYZ().Camera.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
ss.GUI.AddPlots(title, &ss.Logs)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train, etime.Test})
-
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/mpi/README.md")
- },
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train, etime.Test})
+
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/mpi/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
@@ -761,13 +765,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/neuron/config.go b/examples/neuron/config.go
index ef6ae3da9..12af1e2a3 100644
--- a/examples/neuron/config.go
+++ b/examples/neuron/config.go
@@ -8,130 +8,130 @@ package main
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: false] use the GPU for computation -- only for testing in this model -- not faster
- GPU bool `def:"false" desc:"use the GPU for computation -- only for testing in this model -- not faster"`
+ // use the GPU for computation -- only for testing in this model -- not faster
+ GPU bool `def:"false"`
- // [def: 2] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"2" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"2"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 1] [min: 1] total number of runs to do when running Train
- NRuns int `def:"1" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"1" min:"1"`
- // [def: 1] total number of epochs per run
- NEpochs int `def:"1" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"1"`
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save cycle log to file, as .cyc.tsv typically
- Cycle bool `def:"true" nest:"+" desc:"if true, save cycle log to file, as .cyc.tsv typically"`
+ // if true, save cycle log to file, as .cyc.tsv typically
+ Cycle bool `def:"true" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
type Config struct {
- // [def: true] clamp constant Ge value -- otherwise drive discrete spiking input
- GeClamp bool `def:"true" desc:"clamp constant Ge value -- otherwise drive discrete spiking input"`
+ // clamp constant Ge value -- otherwise drive discrete spiking input
+ GeClamp bool `def:"true"`
- // [def: 50] frequency of input spiking for !GeClamp mode
- SpikeHz float32 `def:"50" desc:"frequency of input spiking for !GeClamp mode"`
+ // frequency of input spiking for !GeClamp mode
+ SpikeHz float32 `def:"50"`
- // [def: 0.1] [min: 0] [step: 0.01] Raw synaptic excitatory conductance
- Ge float32 `min:"0" step:"0.01" def:"0.1" desc:"Raw synaptic excitatory conductance"`
+ // Raw synaptic excitatory conductance
+ Ge float32 `min:"0" step:"0.01" def:"0.1"`
- // [def: 0.1] [min: 0] [step: 0.01] Inhibitory conductance
- Gi float32 `min:"0" step:"0.01" def:"0.1" desc:"Inhibitory conductance "`
+ // Inhibitory conductance
+ Gi float32 `min:"0" step:"0.01" def:"0.1"`
- // [def: 1] [min: 0] [max: 1] [step: 0.01] excitatory reversal (driving) potential -- determines where excitation pushes Vm up to
- ErevE float32 `min:"0" max:"1" step:"0.01" def:"1" desc:"excitatory reversal (driving) potential -- determines where excitation pushes Vm up to"`
+ // excitatory reversal (driving) potential -- determines where excitation pushes Vm up to
+ ErevE float32 `min:"0" max:"1" step:"0.01" def:"1"`
- // [def: 0.3] [min: 0] [max: 1] [step: 0.01] leak reversal (driving) potential -- determines where excitation pulls Vm down to
- ErevI float32 `min:"0" max:"1" step:"0.01" def:"0.3" desc:"leak reversal (driving) potential -- determines where excitation pulls Vm down to"`
+ // leak reversal (driving) potential -- determines where excitation pulls Vm down to
+ ErevI float32 `min:"0" max:"1" step:"0.01" def:"0.3"`
- // [min: 0] [step: 0.01] the variance parameter for Gaussian noise added to unit activations on every cycle
- Noise float32 `min:"0" step:"0.01" desc:"the variance parameter for Gaussian noise added to unit activations on every cycle"`
+ // the variance parameter for Gaussian noise added to unit activations on every cycle
+ Noise float32 `min:"0" step:"0.01"`
- // [def: true] apply sodium-gated potassium adaptation mechanisms that cause the neuron to reduce spiking over time
- KNaAdapt bool `def:"true" desc:"apply sodium-gated potassium adaptation mechanisms that cause the neuron to reduce spiking over time"`
+ // apply sodium-gated potassium adaptation mechanisms that cause the neuron to reduce spiking over time
+ KNaAdapt bool `def:"true"`
- // [def: 0.05] strength of mAHP M-type channel -- used to be implemented by KNa but now using the more standard M-type channel mechanism
- MahpGbar float32 `def:"0.05" desc:"strength of mAHP M-type channel -- used to be implemented by KNa but now using the more standard M-type channel mechanism"`
+ // strength of mAHP M-type channel -- used to be implemented by KNa but now using the more standard M-type channel mechanism
+ MahpGbar float32 `def:"0.05"`
- // [def: 0.006] strength of NMDA current -- 0.006 default for posterior cortex
- NMDAGbar float32 `def:"0.006" desc:"strength of NMDA current -- 0.006 default for posterior cortex"`
+ // strength of NMDA current -- 0.006 default for posterior cortex
+ NMDAGbar float32 `def:"0.006"`
- // [def: 0.015] strength of GABAB current -- 0.015 default for posterior cortex
- GABABGbar float32 `def:"0.015" desc:"strength of GABAB current -- 0.015 default for posterior cortex"`
+ // strength of GABAB current -- 0.015 default for posterior cortex
+ GABABGbar float32 `def:"0.015"`
- // [def: 0.02] strength of VGCC voltage gated calcium current -- only activated during spikes -- this is now an essential part of Ca-driven learning to reflect recv spiking in the Ca signal -- but if too strong leads to runaway excitatory bursting.
- VGCCGbar float32 `def:"0.02" desc:"strength of VGCC voltage gated calcium current -- only activated during spikes -- this is now an essential part of Ca-driven learning to reflect recv spiking in the Ca signal -- but if too strong leads to runaway excitatory bursting."`
+ // strength of VGCC voltage gated calcium current -- only activated during spikes -- this is now an essential part of Ca-driven learning to reflect recv spiking in the Ca signal -- but if too strong leads to runaway excitatory bursting.
+ VGCCGbar float32 `def:"0.02"`
- // [def: 0.1] strength of A-type potassium channel -- this is only active at high (depolarized) membrane potentials -- only during spikes -- useful to counteract VGCC's
- AKGbar float32 `def:"0.1" desc:"strength of A-type potassium channel -- this is only active at high (depolarized) membrane potentials -- only during spikes -- useful to counteract VGCC's"`
+ // strength of A-type potassium channel -- this is only active at high (depolarized) membrane potentials -- only during spikes -- useful to counteract VGCC's
+ AKGbar float32 `def:"0.1"`
- // [def: 200] [min: 10] total number of cycles to run
- NCycles int `min:"10" def:"200" desc:"total number of cycles to run"`
+ // total number of cycles to run
+ NCycles int `min:"10" def:"200"`
- // [def: 10] [min: 0] when does excitatory input into neuron come on?
- OnCycle int `min:"0" def:"10" desc:"when does excitatory input into neuron come on?"`
+ // when does excitatory input into neuron come on?
+ OnCycle int `min:"0" def:"10"`
- // [def: 160] [min: 0] when does excitatory input into neuron go off?
- OffCycle int `min:"0" def:"160" desc:"when does excitatory input into neuron go off?"`
+ // when does excitatory input into neuron go off?
+ OffCycle int `min:"0" def:"160"`
- // [def: 10] [min: 1] how often to update display (in cycles)
- UpdtInterval int `min:"1" def:"10" desc:"how often to update display (in cycles)"`
+ // how often to update display (in cycles)
+ UpdtInterval int `min:"1" def:"10" `
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/neuron/gtigen.go b/examples/neuron/gtigen.go
new file mode 100644
index 000000000..a31aae6d0
--- /dev/null
+++ b/examples/neuron/gtigen.go
@@ -0,0 +1,131 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- only for testing in this model -- not faster", Directives: gti.Directives{}, Tag: "def:\"false\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"2\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"NRuns", >i.Field{Name: "NRuns", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"1\" min:\"1\""}},
+ {"NEpochs", >i.Field{Name: "NEpochs", Type: "int", LocalType: "int", Doc: "total number of epochs per run", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Cycle", >i.Field{Name: "Cycle", Type: "bool", LocalType: "bool", Doc: "if true, save cycle log to file, as .cyc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GeClamp", >i.Field{Name: "GeClamp", Type: "bool", LocalType: "bool", Doc: "clamp constant Ge value -- otherwise drive discrete spiking input", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"SpikeHz", >i.Field{Name: "SpikeHz", Type: "float32", LocalType: "float32", Doc: "frequency of input spiking for !GeClamp mode", Directives: gti.Directives{}, Tag: "def:\"50\""}},
+ {"Ge", >i.Field{Name: "Ge", Type: "float32", LocalType: "float32", Doc: "Raw synaptic excitatory conductance", Directives: gti.Directives{}, Tag: "min:\"0\" step:\"0.01\" def:\"0.1\""}},
+ {"Gi", >i.Field{Name: "Gi", Type: "float32", LocalType: "float32", Doc: "Inhibitory conductance", Directives: gti.Directives{}, Tag: "min:\"0\" step:\"0.01\" def:\"0.1\""}},
+ {"ErevE", >i.Field{Name: "ErevE", Type: "float32", LocalType: "float32", Doc: "excitatory reversal (driving) potential -- determines where excitation pushes Vm up to", Directives: gti.Directives{}, Tag: "min:\"0\" max:\"1\" step:\"0.01\" def:\"1\""}},
+ {"ErevI", >i.Field{Name: "ErevI", Type: "float32", LocalType: "float32", Doc: "leak reversal (driving) potential -- determines where excitation pulls Vm down to", Directives: gti.Directives{}, Tag: "min:\"0\" max:\"1\" step:\"0.01\" def:\"0.3\""}},
+ {"Noise", >i.Field{Name: "Noise", Type: "float32", LocalType: "float32", Doc: "the variance parameter for Gaussian noise added to unit activations on every cycle", Directives: gti.Directives{}, Tag: "min:\"0\" step:\"0.01\""}},
+ {"KNaAdapt", >i.Field{Name: "KNaAdapt", Type: "bool", LocalType: "bool", Doc: "apply sodium-gated potassium adaptation mechanisms that cause the neuron to reduce spiking over time", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"MahpGbar", >i.Field{Name: "MahpGbar", Type: "float32", LocalType: "float32", Doc: "strength of mAHP M-type channel -- used to be implemented by KNa but now using the more standard M-type channel mechanism", Directives: gti.Directives{}, Tag: "def:\"0.05\""}},
+ {"NMDAGbar", >i.Field{Name: "NMDAGbar", Type: "float32", LocalType: "float32", Doc: "strength of NMDA current -- 0.006 default for posterior cortex", Directives: gti.Directives{}, Tag: "def:\"0.006\""}},
+ {"GABABGbar", >i.Field{Name: "GABABGbar", Type: "float32", LocalType: "float32", Doc: "strength of GABAB current -- 0.015 default for posterior cortex", Directives: gti.Directives{}, Tag: "def:\"0.015\""}},
+ {"VGCCGbar", >i.Field{Name: "VGCCGbar", Type: "float32", LocalType: "float32", Doc: "strength of VGCC voltage gated calcium current -- only activated during spikes -- this is now an essential part of Ca-driven learning to reflect recv spiking in the Ca signal -- but if too strong leads to runaway excitatory bursting.", Directives: gti.Directives{}, Tag: "def:\"0.02\""}},
+ {"AKGbar", >i.Field{Name: "AKGbar", Type: "float32", LocalType: "float32", Doc: "strength of A-type potassium channel -- this is only active at high (depolarized) membrane potentials -- only during spikes -- useful to counteract VGCC's", Directives: gti.Directives{}, Tag: "def:\"0.1\""}},
+ {"NCycles", >i.Field{Name: "NCycles", Type: "int", LocalType: "int", Doc: "total number of cycles to run", Directives: gti.Directives{}, Tag: "min:\"10\" def:\"200\""}},
+ {"OnCycle", >i.Field{Name: "OnCycle", Type: "int", LocalType: "int", Doc: "when does excitatory input into neuron come on?", Directives: gti.Directives{}, Tag: "min:\"0\" def:\"10\""}},
+ {"OffCycle", >i.Field{Name: "OffCycle", Type: "int", LocalType: "int", Doc: "when does excitatory input into neuron go off?", Directives: gti.Directives{}, Tag: "min:\"0\" def:\"160\""}},
+ {"UpdtInterval", >i.Field{Name: "UpdtInterval", Type: "int", LocalType: "int", Doc: "how often to update display (in cycles)", Directives: gti.Directives{}, Tag: "min:\"1\" def:\"10\" "}},
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/neuron.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/neuron.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/neuron.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.NeuronEx",
+ ShortName: "main.NeuronEx",
+ IDName: "neuron-ex",
+ Doc: "Extra state for neuron",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"InISI", >i.Field{Name: "InISI", Type: "float32", LocalType: "float32", Doc: "input ISI countdown for spiking mode -- counts up", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/neuron.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"NeuronEx", >i.Field{Name: "NeuronEx", Type: "github.com/emer/axon/examples/neuron.NeuronEx", LocalType: "NeuronEx", Doc: "extra neuron state for additional channels: VGCC, AK", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "logging", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "all parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Cycle", >i.Field{Name: "Cycle", Type: "int", LocalType: "int", Doc: "current cycle of updating", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TstCycPlot", >i.Field{Name: "TstCycPlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the test-trial plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"ValMap", >i.Field{Name: "ValMap", Type: "map[string]float32", LocalType: "map[string]float32", Doc: "map of values for detailed debugging / testing", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/neuron/neuron.go b/examples/neuron/neuron.go
index 85952da8a..c69e2095b 100644
--- a/examples/neuron/neuron.go
+++ b/examples/neuron/neuron.go
@@ -9,34 +9,32 @@ influences (including leak and synaptic inhibition).
*/
package main
+//go:generate goki generate -add-types
+
import (
"fmt"
"log"
"os"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/ecmd"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/emer/etable/minmax"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/ecmd"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
)
func main() {
@@ -44,7 +42,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -79,7 +77,7 @@ var ParamSets = netparams.Sets{
type NeuronEx struct {
// input ISI countdown for spiking mode -- counts up
- InISI float32 `desc:"input ISI countdown for spiking mode -- counts up"`
+ InISI float32
}
func (nrn *NeuronEx) Init() {
@@ -94,49 +92,37 @@ func (nrn *NeuronEx) Init() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: no-inline] extra neuron state for additional channels: VGCC, AK
- NeuronEx NeuronEx `view:"no-inline" desc:"extra neuron state for additional channels: VGCC, AK"`
+ // extra neuron state for additional channels: VGCC, AK
+ NeuronEx NeuronEx `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
- // [view: no-inline] logging
- Logs elog.Logs `view:"no-inline" desc:"logging"`
+ // logging
+ Logs elog.Logs `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.NetParams `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
// current cycle of updating
- Cycle int `inactive:"+" desc:"current cycle of updating"`
-
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
-
- // [view: -] the network viewer
- NetView *netview.NetView `view:"-" desc:"the network viewer"`
+ Cycle int `inactive:"+"`
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] the test-trial plot
- TstCycPlot *eplot.Plot2D `view:"-" desc:"the test-trial plot"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] map of values for detailed debugging / testing
- ValMap map[string]float32 `view:"-" desc:"map of values for detailed debugging / testing"`
-
- // [view: -] true if sim is running
- IsRunning bool `view:"-" desc:"true if sim is running"`
-
- // [view: -] flag to stop running
- StopNow bool `view:"-" desc:"flag to stop running"`
+ // map of values for detailed debugging / testing
+ ValMap map[string]float32 `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -199,9 +185,8 @@ func (ss *Sim) Init() {
ss.Context.Reset()
ss.InitWts(ss.Net)
ss.NeuronEx.Init()
- ss.StopNow = false
+ ss.GUI.StopNow = false
ss.SetParams("", false) // all sheets
- ss.UpdateView()
}
// Counters returns a string of the current counter state
@@ -212,12 +197,9 @@ func (ss *Sim) Counters() string {
}
func (ss *Sim) UpdateView() {
- ss.TstCycPlot.UpdatePlot()
- if ss.NetView != nil && ss.NetView.IsVisible() {
- ss.NetView.Record(ss.Counters(), int(ss.Context.Cycle))
- // note: essential to use Go version of update when called from another goroutine
- ss.NetView.GoUpdate() // note: using counters is significantly slower..
- }
+ ss.GUI.UpdatePlot(etime.Test, etime.Cycle)
+ ss.GUI.ViewUpdt.Text = ss.Counters()
+ ss.GUI.ViewUpdt.UpdateCycle(int(ss.Context.Cycle))
}
////////////////////////////////////////////////////////////////////////////////
@@ -227,7 +209,7 @@ func (ss *Sim) UpdateView() {
func (ss *Sim) RunCycles() {
ctx := &ss.Context
ss.Init()
- ss.StopNow = false
+ ss.GUI.StopNow = false
ss.Net.InitActs(ctx)
ctx.NewState(etime.Train)
ss.SetParams("", false)
@@ -249,7 +231,7 @@ func (ss *Sim) RunCycles() {
ss.UpdateView()
}
ss.Context.CycleInc()
- if ss.StopNow {
+ if ss.GUI.StopNow {
break
}
}
@@ -314,7 +296,7 @@ func (ss *Sim) NeuronUpdt(nt *axon.Network, inputOn bool) {
// Stop tells the sim to stop running
func (ss *Sim) Stop() {
- ss.StopNow = true
+ ss.GUI.StopNow = true
}
/////////////////////////////////////////////////////////////////////////
@@ -387,7 +369,7 @@ func (ss *Sim) ConfigLogItems() {
func (ss *Sim) ResetTstCycPlot() {
ss.Logs.ResetLog(etime.Test, etime.Cycle)
- ss.TstCycPlot.Update()
+ ss.GUI.UpdatePlot(etime.Test, etime.Cycle)
}
////////////////////////////////////////////////////////////////////////////////////////////
@@ -397,116 +379,84 @@ func (ss *Sim) ConfigNetView(nv *netview.NetView) {
nv.ViewDefaults()
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
- width := 1600
- height := 1200
-
- gi.SetAppName("neuron")
- gi.SetAppAbout(`This simulation illustrates the basic properties of neural spiking and
-rate-code activation, reflecting a balance of excitatory and inhibitory
-influences (including leak and synaptic inhibition).
-See README.md on GitHub.`)
-
- win := gi.NewMainWindow("neuron", "Neuron", width, height)
- ss.Win = win
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
+ title := "Neuron"
+ ss.GUI.MakeBody(ss, "neuron", title, `This simulation illustrates the basic properties of neural spiking and rate-code activation, reflecting a balance of excitatory and inhibitory influences (including leak and synaptic inhibition). See README.md on GitHub.`)
+ ss.GUI.CycleUpdateInterval = 10
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
-
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMaxWidth()
- split.SetStretchMaxHeight()
-
- sv := giv.AddNewStructView(split, "sv")
- sv.SetStruct(ss)
-
- tv := gi.AddNewTabView(split, "tv")
-
- nv := tv.AddNewTab(netview.KiT_NetView, "NetView").(*netview.NetView)
+ nv := ss.GUI.AddNetView("NetView")
nv.Var = "Act"
nv.SetNet(ss.Net)
- ss.NetView = nv
ss.ConfigNetView(nv) // add labels etc
-
- plt := tv.AddNewTab(eplot.KiT_Plot2D, "TstCycPlot").(*eplot.Plot2D)
- key := etime.Scope(etime.Test, etime.Cycle)
- plt.SetTable(ss.Logs.Table(etime.Test, etime.Cycle))
- egui.ConfigPlotFromLog("Neuron", plt, &ss.Logs, key)
- ss.TstCycPlot = plt
-
- split.SetSplits(.2, .8)
-
- tbar.AddAction(gi.ActOpts{Label: "Init", Icon: "update", Tooltip: "Initialize everything including network weights, and start over. Also applies current params.", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(!ss.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.Init()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Stop", Icon: "stop", Tooltip: "Interrupts running. Hitting Train again will pick back up where it left off.", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(ss.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.Stop()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Run Cycles", Icon: "step-fwd", Tooltip: "Runs neuron updating over NCycles.", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(!ss.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- if !ss.IsRunning {
- ss.IsRunning = true
- ss.RunCycles()
- ss.IsRunning = false
- vp.SetNeedsFullRender()
- }
- })
-
- tbar.AddSeparator("run-sep")
-
- tbar.AddAction(gi.ActOpts{Label: "Reset Plot", Icon: "update", Tooltip: "Reset TstCycPlot.", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(!ss.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- if !ss.IsRunning {
- ss.ResetTstCycPlot()
- }
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Defaults", Icon: "update", Tooltip: "Restore initial default parameters.", UpdateFunc: func(act *gi.Action) {
- act.SetActiveStateUpdt(!ss.IsRunning)
- }}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.Defaults()
- ss.Init()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send ki.Ki, sig int64, data interface{}) {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/neuron/README.md")
+ ss.ViewUpdt.Config(nv, etime.AlphaCycle, etime.AlphaCycle)
+ ss.GUI.ViewUpdt = &ss.ViewUpdt
+
+ ss.GUI.AddPlots(title, &ss.Logs)
+ // key := etime.Scope(etime.Test, etime.Cycle)
+ // plt := ss.GUI.NewPlot(key, ss.GUI.Tabs.NewTab("TstCycPlot"))
+ // plt.SetTable(ss.Logs.Table(etime.Test, etime.Cycle))
+ // egui.ConfigPlotFromLog("Neuron", plt, &ss.Logs, key)
+ // ss.TstCycPlot = plt
+
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Stop", Icon: "stop",
+ Tooltip: "Stops running.",
+ Active: egui.ActiveRunning,
+ Func: func() {
+ ss.Stop()
+ ss.GUI.UpdateWindow()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Run Cycles", Icon: "step-fwd",
+ Tooltip: "Runs neuron updating over NCycles.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ if !ss.GUI.IsRunning {
+ ss.GUI.IsRunning = true
+ ss.RunCycles()
+ ss.GUI.IsRunning = false
+ ss.GUI.UpdateWindow()
+ }
+ },
+ })
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset Plot", Icon: "update",
+ Tooltip: "Reset TstCycPlot.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.ResetTstCycPlot()
+ ss.GUI.UpdateWindow()
+ },
})
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.SetCloseCleanFunc(func(w *gi.Window) {
- go gi.Quit() // once main window is closed, quit
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Defaults", Icon: "update",
+ Tooltip: "Restore initial default parameters.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Defaults()
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/neuron/README.md")
+ },
+ })
})
+ ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
ss.Net.ConfigGPUwithGUI(&ss.Context)
@@ -514,15 +464,12 @@ See 1 then does selection among options presented in parallel (not yet supported / tested) -- otherwise does go / no on a single optoin (default)
- NPools int `def:"1" desc:"number of pools in BG / PFC -- if > 1 then does selection among options presented in parallel (not yet supported / tested) -- otherwise does go / no on a single optoin (default)"`
+ // number of pools in BG / PFC -- if > 1 then does selection among options presented in parallel (not yet supported / tested) -- otherwise does go / no on a single optoin (default)
+ NPools int `def:"1"`
}
// ParamConfig has config parameters related to sim params
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"true"`
- // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
- NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."`
+ // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
+ NData int `def:"16" min:"1"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 1] [min: 1] total number of runs to do when running Train
- NRuns int `def:"1" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"1" min:"1"`
- // [def: 30] total number of epochs per run
- NEpochs int `def:"30" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"30"`
- // [def: 128] total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `def:"128" desc:"total number of trials per epoch. Should be an even multiple of NData."`
+ // total number of trials per epoch. Should be an even multiple of NData.
+ NTrials int `def:"128"`
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
- // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."`
+ // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
+ TestEpoch bool `def:"false" nest:"+"`
- // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."`
+ // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
+ TestTrial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
// activates testing mode -- records detailed data for Go CI tests (not the same as running test mode on network, via Looper)
- Testing bool `desc:"activates testing mode -- records detailed data for Go CI tests (not the same as running test mode on network, via Looper)"`
+ Testing bool
}
// Config is a standard Sim config -- use as a starting point.
type Config struct {
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] environment configuration options
- Env EnvConfig `view:"add-fields" desc:"environment configuration options"`
+ // environment configuration options
+ Env EnvConfig `view:"add-fields"`
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/pcore/gono_env.go b/examples/pcore/gono_env.go
index 832721a4b..0626e470c 100644
--- a/examples/pcore/gono_env.go
+++ b/examples/pcore/gono_env.go
@@ -7,91 +7,91 @@ package main
import (
"math/rand"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/popcode"
- "github.com/emer/etable/etensor"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/popcode"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/mat32/v2"
)
// GoNoEnv implements simple Go vs. NoGo input patterns to test BG learning.
type GoNoEnv struct {
// name of environment -- Train or Test
- Nm string `desc:"name of environment -- Train or Test"`
+ Nm string
// training or testing env?
- Mode etime.Modes `desc:"training or testing env?"`
+ Mode etime.Modes
// trial counter -- set by caller for testing
- Trial env.Ctr `desc:"trial counter -- set by caller for testing"`
+ Trial env.Ctr
// activation of ACC positive valence -- drives go
- ACCPos float32 `desc:"activation of ACC positive valence -- drives go"`
+ ACCPos float32
// activation of ACC neg valence -- drives nogo
- ACCNeg float32 `desc:"activation of ACC neg valence -- drives nogo"`
+ ACCNeg float32
// threshold on diff between ACCPos - ACCNeg for counting as a Go trial
- PosNegThr float32 `desc:"threshold on diff between ACCPos - ACCNeg for counting as a Go trial"`
+ PosNegThr float32
// ACCPos and Neg are set manually -- do not generate random vals for training or auto-increment ACCPos / Neg values during test
- ManualVals bool `desc:"ACCPos and Neg are set manually -- do not generate random vals for training or auto-increment ACCPos / Neg values during test"`
+ ManualVals bool
// increment in testing activation for test all
- TestInc float32 `desc:"increment in testing activation for test all"`
+ TestInc float32
// number of repetitions per testing level
- TestReps int `desc:"number of repetitions per testing level"`
+ TestReps int
- // [view: -] number of pools for representing multiple different options to be evaluated in parallel, vs. 1 pool with a simple go nogo overall choice -- currently tested / configured for the 1 pool case
- NPools int `view:"-" desc:"number of pools for representing multiple different options to be evaluated in parallel, vs. 1 pool with a simple go nogo overall choice -- currently tested / configured for the 1 pool case"`
+ // number of pools for representing multiple different options to be evaluated in parallel, vs. 1 pool with a simple go nogo overall choice -- currently tested / configured for the 1 pool case
+ NPools int `view:"-"`
// for case with multiple pools evaluated in parallel (not currently used), this is the across-pools multiplier in activation of ACC positive valence -- e.g., .9 daecrements subsequent units by 10%
- ACCPosInc float32 `desc:"for case with multiple pools evaluated in parallel (not currently used), this is the across-pools multiplier in activation of ACC positive valence -- e.g., .9 daecrements subsequent units by 10%"`
+ ACCPosInc float32
// for case with multiple pools evaluated in parallel (not currently used), this is the across-pools multiplier in activation of ACC neg valence, e.g., 1.1 increments subsequent units by 10%
- ACCNegInc float32 `desc:"for case with multiple pools evaluated in parallel (not currently used), this is the across-pools multiplier in activation of ACC neg valence, e.g., 1.1 increments subsequent units by 10%"`
+ ACCNegInc float32
- // [view: -] number of units within each pool, Y
- NUnitsY int `view:"-" desc:"number of units within each pool, Y"`
+ // number of units within each pool, Y
+ NUnitsY int `view:"-"`
- // [view: -] number of units within each pool, X
- NUnitsX int `view:"-" desc:"number of units within each pool, X"`
+ // number of units within each pool, X
+ NUnitsX int `view:"-"`
- // [view: -] total number of units within each pool
- NUnits int `view:"-" desc:"total number of units within each pool"`
+ // total number of units within each pool
+ NUnits int `view:"-"`
// number of different values for PFC to learn in input layer -- gives PFC network something to do
- InN int `desc:"number of different values for PFC to learn in input layer -- gives PFC network something to do"`
+ InN int
// pop code the values in ACCPos and Neg
- PopCode popcode.OneD `desc:"pop code the values in ACCPos and Neg"`
+ PopCode popcode.OneD
- // [view: -] random number generator for the env -- all random calls must use this
- Rand erand.SysRand `view:"-" desc:"random number generator for the env -- all random calls must use this"`
+ // random number generator for the env -- all random calls must use this
+ Rand erand.SysRand `view:"-"`
// random seed
- RndSeed int64 `inactive:"+" desc:"random seed"`
+ RndSeed int64 `inactive:"+"`
// named states: ACCPos, ACCNeg
- States map[string]*etensor.Float32 `desc:"named states: ACCPos, ACCNeg"`
+ States map[string]*etensor.Float32
// true if Pos - Neg > Thr
- Should bool `inactive:"+" desc:"true if Pos - Neg > Thr"`
+ Should bool `inactive:"+"`
// true if model gated on this trial
- Gated bool `inactive:"+" desc:"true if model gated on this trial"`
+ Gated bool `inactive:"+"`
// true if gated == should
- Match bool `inactive:"+" desc:"true if gated == should"`
+ Match bool `inactive:"+"`
// reward based on match between Should vs. Gated
- Rew float32 `inactive:"+" desc:"reward based on match between Should vs. Gated"`
+ Rew float32 `inactive:"+"`
// input counter -- gives PFC network something to do
- InCtr int `inactive:"+" desc:"input counter -- gives PFC network something to do"`
+ InCtr int `inactive:"+"`
}
func (ev *GoNoEnv) Name() string {
diff --git a/examples/pcore/gtigen.go b/examples/pcore/gtigen.go
new file mode 100644
index 000000000..1e23b898c
--- /dev/null
+++ b/examples/pcore/gtigen.go
@@ -0,0 +1,160 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.EnvConfig",
+ ShortName: "main.EnvConfig",
+ IDName: "env-config",
+ Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Env", >i.Field{Name: "Env", Type: "map[string]any", LocalType: "map[string]any", Doc: "env parameters -- can set any field/subfield on Env struct, using standard TOML formatting", Directives: gti.Directives{}, Tag: ""}},
+ {"ZeroTest", >i.Field{Name: "ZeroTest", Type: "bool", LocalType: "bool", Doc: "test with no ACC activity at all -- params need to prevent gating in this situation too", Directives: gti.Directives{}, Tag: ""}},
+ {"NPools", >i.Field{Name: "NPools", Type: "int", LocalType: "int", Doc: "number of pools in BG / PFC -- if > 1 then does selection among options presented in parallel (not yet supported / tested) -- otherwise does go / no on a single optoin (default)", Directives: gti.Directives{}, Tag: "def:\"1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"NData", >i.Field{Name: "NData", Type: "int", LocalType: "int", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.", Directives: gti.Directives{}, Tag: "def:\"16\" min:\"1\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"NRuns", >i.Field{Name: "NRuns", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"1\" min:\"1\""}},
+ {"NEpochs", >i.Field{Name: "NEpochs", Type: "int", LocalType: "int", Doc: "total number of epochs per run", Directives: gti.Directives{}, Tag: "def:\"30\""}},
+ {"NTrials", >i.Field{Name: "NTrials", Type: "int", LocalType: "int", Doc: "total number of trials per epoch. Should be an even multiple of NData.", Directives: gti.Directives{}, Tag: "def:\"128\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "bool", LocalType: "bool", Doc: "if true, save train epoch log to file, as .epc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Run", >i.Field{Name: "Run", Type: "bool", LocalType: "bool", Doc: "if true, save run log to file, as .run.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestEpoch", >i.Field{Name: "TestEpoch", Type: "bool", LocalType: "bool", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestTrial", >i.Field{Name: "TestTrial", Type: "bool", LocalType: "bool", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ {"Testing", >i.Field{Name: "Testing", Type: "bool", LocalType: "bool", Doc: "activates testing mode -- records detailed data for Go CI tests (not the same as running test mode on network, via Looper)", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Env", >i.Field{Name: "Env", Type: "github.com/emer/axon/examples/pcore.EnvConfig", LocalType: "EnvConfig", Doc: "environment configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/pcore.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/pcore.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/pcore.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.GoNoEnv",
+ ShortName: "main.GoNoEnv",
+ IDName: "go-no-env",
+ Doc: "GoNoEnv implements simple Go vs. NoGo input patterns to test BG learning.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Nm", >i.Field{Name: "Nm", Type: "string", LocalType: "string", Doc: "name of environment -- Train or Test", Directives: gti.Directives{}, Tag: ""}},
+ {"Mode", >i.Field{Name: "Mode", Type: "github.com/emer/emergent/v2/etime.Modes", LocalType: "etime.Modes", Doc: "training or testing env?", Directives: gti.Directives{}, Tag: ""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "trial counter -- set by caller for testing", Directives: gti.Directives{}, Tag: ""}},
+ {"ACCPos", >i.Field{Name: "ACCPos", Type: "float32", LocalType: "float32", Doc: "activation of ACC positive valence -- drives go", Directives: gti.Directives{}, Tag: ""}},
+ {"ACCNeg", >i.Field{Name: "ACCNeg", Type: "float32", LocalType: "float32", Doc: "activation of ACC neg valence -- drives nogo", Directives: gti.Directives{}, Tag: ""}},
+ {"PosNegThr", >i.Field{Name: "PosNegThr", Type: "float32", LocalType: "float32", Doc: "threshold on diff between ACCPos - ACCNeg for counting as a Go trial", Directives: gti.Directives{}, Tag: ""}},
+ {"ManualVals", >i.Field{Name: "ManualVals", Type: "bool", LocalType: "bool", Doc: "ACCPos and Neg are set manually -- do not generate random vals for training or auto-increment ACCPos / Neg values during test", Directives: gti.Directives{}, Tag: ""}},
+ {"TestInc", >i.Field{Name: "TestInc", Type: "float32", LocalType: "float32", Doc: "increment in testing activation for test all", Directives: gti.Directives{}, Tag: ""}},
+ {"TestReps", >i.Field{Name: "TestReps", Type: "int", LocalType: "int", Doc: "number of repetitions per testing level", Directives: gti.Directives{}, Tag: ""}},
+ {"NPools", >i.Field{Name: "NPools", Type: "int", LocalType: "int", Doc: "number of pools for representing multiple different options to be evaluated in parallel, vs. 1 pool with a simple go nogo overall choice -- currently tested / configured for the 1 pool case", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"ACCPosInc", >i.Field{Name: "ACCPosInc", Type: "float32", LocalType: "float32", Doc: "for case with multiple pools evaluated in parallel (not currently used), this is the across-pools multiplier in activation of ACC positive valence -- e.g., .9 daecrements subsequent units by 10%", Directives: gti.Directives{}, Tag: ""}},
+ {"ACCNegInc", >i.Field{Name: "ACCNegInc", Type: "float32", LocalType: "float32", Doc: "for case with multiple pools evaluated in parallel (not currently used), this is the across-pools multiplier in activation of ACC neg valence, e.g., 1.1 increments subsequent units by 10%", Directives: gti.Directives{}, Tag: ""}},
+ {"NUnitsY", >i.Field{Name: "NUnitsY", Type: "int", LocalType: "int", Doc: "number of units within each pool, Y", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"NUnitsX", >i.Field{Name: "NUnitsX", Type: "int", LocalType: "int", Doc: "number of units within each pool, X", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"NUnits", >i.Field{Name: "NUnits", Type: "int", LocalType: "int", Doc: "total number of units within each pool", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"InN", >i.Field{Name: "InN", Type: "int", LocalType: "int", Doc: "number of different values for PFC to learn in input layer -- gives PFC network something to do", Directives: gti.Directives{}, Tag: ""}},
+ {"PopCode", >i.Field{Name: "PopCode", Type: "github.com/emer/emergent/v2/popcode.OneD", LocalType: "popcode.OneD", Doc: "pop code the values in ACCPos and Neg", Directives: gti.Directives{}, Tag: ""}},
+ {"Rand", >i.Field{Name: "Rand", Type: "github.com/emer/emergent/v2/erand.SysRand", LocalType: "erand.SysRand", Doc: "random number generator for the env -- all random calls must use this", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeed", >i.Field{Name: "RndSeed", Type: "int64", LocalType: "int64", Doc: "random seed", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"States", >i.Field{Name: "States", Type: "map[string]*goki.dev/etable/v2/etensor.Float32", LocalType: "map[string]*etensor.Float32", Doc: "named states: ACCPos, ACCNeg", Directives: gti.Directives{}, Tag: ""}},
+ {"Should", >i.Field{Name: "Should", Type: "bool", LocalType: "bool", Doc: "true if Pos - Neg > Thr", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Gated", >i.Field{Name: "Gated", Type: "bool", LocalType: "bool", Doc: "true if model gated on this trial", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Match", >i.Field{Name: "Match", Type: "bool", LocalType: "bool", Doc: "true if gated == should", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Rew", >i.Field{Name: "Rew", Type: "float32", LocalType: "float32", Doc: "reward based on match between Should vs. Gated", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"InCtr", >i.Field{Name: "InCtr", Type: "int", LocalType: "int", Doc: "input counter -- gives PFC network something to do", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/pcore.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "all parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"Envs", >i.Field{Name: "Envs", Type: "github.com/emer/emergent/v2/env.Envs", LocalType: "env.Envs", Doc: "Environments", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/pcore/params.go b/examples/pcore/params.go
index c68028e9f..682804234 100644
--- a/examples/pcore/params.go
+++ b/examples/pcore/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets is the default set of parameters -- Base is always applied,
@@ -33,8 +33,9 @@ var ParamSets = netparams.Sets{
// Prjns
{Sel: ".MatrixPrjn", Desc: "",
Params: params.Params{
- "Prjn.Matrix.NoGateLRate": "1", // 1 is good -- drives learning on nogate which is rewarded -- more closely tracks
- "Prjn.Learn.LRate.Base": "0.02", // .02 default
+ "Prjn.Matrix.NoGateLRate": "1", // 1 is good -- drives learning on nogate which is rewarded -- more closely tracks
+ "Prjn.Learn.LRate.Base": "0.02", // .02 default
+ "Prjn.Learn.Trace.LearnThr": "0.75",
}},
{Sel: "#UrgencyToMtxGo", Desc: "strong urgency factor",
Params: params.Params{
diff --git a/examples/pcore/params_good/params.toml b/examples/pcore/params_good/params.toml
index 84f1444d4..fa0d5dc30 100644
--- a/examples/pcore/params_good/params.toml
+++ b/examples/pcore/params_good/params.toml
@@ -25,6 +25,7 @@
Desc = ""
[Base.Params]
"Prjn.Learn.LRate.Base" = "0.02"
+ "Prjn.Learn.Trace.LearnThr" = "0.75"
"Prjn.Matrix.NoGateLRate" = "1"
[[Base]]
diff --git a/examples/pcore/params_good/params_all.txt b/examples/pcore/params_good/params_all.txt
index fa4aec392..d5f4279cf 100644
--- a/examples/pcore/params_good/params_all.txt
+++ b/examples/pcore/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPi
@@ -135,13 +135,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -161,7 +161,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPeOut
@@ -219,13 +219,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -245,7 +245,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPeIn
@@ -323,13 +323,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -349,7 +349,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPeTA
@@ -407,13 +407,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 3 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -433,7 +433,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: GPeInToSTNp
@@ -488,13 +488,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 3 C50: 0.4 ActTau: 15 DeTau: 30 KCaR: 0.4 CaRDecayTau: 200 CaInThr: 0.01 CaInTau: 50 }
@@ -514,7 +514,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: PFCToSTNs
@@ -549,13 +549,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -575,7 +575,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: false SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
}
Matrix: {
GateThr: 0.05 IsVS: false OtherMatrixIdx: 7 ThalLay1Idx: 17 ThalLay2Idx: -1 ThalLay3Idx: -1 ThalLay4Idx: -1 ThalLay5Idx: -1 ThalLay6Idx: -1
@@ -702,13 +702,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -728,7 +728,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: false SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 0 DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Positive DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
}
Matrix: {
GateThr: 0.05 IsVS: false OtherMatrixIdx: 6 ThalLay1Idx: 17 ThalLay2Idx: -1 ThalLay3Idx: -1 ThalLay4Idx: -1 ThalLay5Idx: -1 ThalLay6Idx: -1
@@ -832,13 +832,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -858,7 +858,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: Urgency
@@ -873,13 +873,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -899,7 +899,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: ACCPos
@@ -914,13 +914,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -940,7 +940,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: ACCNeg
@@ -955,13 +955,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -981,7 +981,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: In
@@ -996,13 +996,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1022,7 +1022,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: InP
@@ -1037,13 +1037,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1063,7 +1063,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 12
@@ -1101,13 +1101,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1127,7 +1127,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -1185,13 +1185,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1211,7 +1211,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -1269,13 +1269,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1295,7 +1295,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: PFCVMToPFCPT
@@ -1370,13 +1370,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 1 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1396,7 +1396,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: PFCPTToPFCVM
diff --git a/examples/pcore/pcore.go b/examples/pcore/pcore.go
index 832c8df72..16921fcca 100644
--- a/examples/pcore/pcore.go
+++ b/examples/pcore/pcore.go
@@ -7,34 +7,36 @@ pcore: This project simulates the inhibitory dynamics in the STN and GPe leading
*/
package main
+//go:generate goki generate -add-types
+
import (
"fmt"
"os"
"strconv"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/ecmd"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/split"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/ki/bools"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/ecmd"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/split"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/glop/num"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -42,7 +44,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -58,37 +60,37 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.NetParams `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -531,9 +533,9 @@ func (ss *Sim) TrialStats(di int) {
ev := ss.Envs.ByModeDi(ctx.Mode, di).(*GoNoEnv)
ss.Stats.SetFloat32("ACCPos", ev.ACCPos)
ss.Stats.SetFloat32("ACCNeg", ev.ACCNeg)
- ss.Stats.SetFloat32("Gated", bools.ToFloat32(ev.Gated))
- ss.Stats.SetFloat32("Should", bools.ToFloat32(ev.Should))
- ss.Stats.SetFloat32("Match", bools.ToFloat32(ev.Match))
+ ss.Stats.SetFloat32("Gated", num.FromBool[float32](ev.Gated))
+ ss.Stats.SetFloat32("Should", num.FromBool[float32](ev.Should))
+ ss.Stats.SetFloat32("Match", num.FromBool[float32](ev.Match))
ss.Stats.SetFloat32("Rew", ev.Rew)
ss.Stats.SetFloat32("PFCVM_RT", ss.Stats.Float32Di("PFCVM_RT", di))
ss.Stats.SetFloat32("PFCVM_ActAvg", ss.Stats.Float32Di("PFCVM_ActAvg", di))
@@ -646,10 +648,10 @@ func (ss *Sim) TestStats() {
////////////////////////////////////////////////////////////////////////////////////////////
// Gui
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "PCore Test"
- ss.GUI.MakeWindow(ss, "pcore", title, `This project simulates the inhibitory dynamics in the STN and GPe leading to integration of Go vs. NoGo signal in the basal ganglia. See axon on GitHub.`)
+ ss.GUI.MakeBody(ss, "pcore", title, `This project simulates the inhibitory dynamics in the STN and GPe leading to integration of Go vs. NoGo signal in the basal ganglia. See axon on GitHub.`)
ss.GUI.CycleUpdateInterval = 20
nv := ss.GUI.AddNetView("NetView")
@@ -659,7 +661,7 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.ViewUpdt.Config(nv, etime.Phase, etime.Phase)
// nv.Scene().Camera.Pose.Pos.Set(-0.028028872, 2.1134117, 2.3178313)
- // nv.Scene().Camera.LookAt(mat32.Vec3{0.00030842167, 0.045156803, -0.039506555}, mat32.Vec3{0, 1, 0})
+ // nv.Scene().Camera.LookAt(mat32.V3(0.00030842167, 0.045156803, -0.039506555), mat32.V3(0, 1, 0))
ss.GUI.ViewUpdt = &ss.ViewUpdt
@@ -667,59 +669,61 @@ func (ss *Sim) ConfigGui() *gi.Window {
tststnm := "TestTrialStats"
tstst := ss.Logs.MiscTable(tststnm)
- plt := ss.GUI.TabView.AddNewTab(eplot.KiT_Plot2D, tststnm+" Plot").(*eplot.Plot2D)
+ plt := eplot.NewPlot2D(ss.GUI.Tabs.NewTab(tststnm + " Plot"))
ss.GUI.Plots[etime.ScopeKey(tststnm)] = plt
plt.Params.Title = tststnm
plt.Params.XAxisCol = "Trial"
plt.SetTable(tstst)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train, etime.Test})
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "TestInit", Icon: "update",
- Tooltip: "reinitialize the testing control so it re-runs.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Loops.ResetCountersByMode(etime.Test)
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train, etime.Test})
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "TestInit", Icon: "update",
+ Tooltip: "reinitialize the testing control so it re-runs.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Loops.ResetCountersByMode(etime.Test)
+ ss.GUI.UpdateWindow()
+ },
+ })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/pcore/README.md")
- },
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/pcore/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
@@ -729,13 +733,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/pcore/pcore_test.go b/examples/pcore/pcore_test.go
index 607930802..401da2831 100644
--- a/examples/pcore/pcore_test.go
+++ b/examples/pcore/pcore_test.go
@@ -4,7 +4,7 @@ import (
"os"
"testing"
- "github.com/emer/etable/tsragg"
+ "goki.dev/etable/v2/tsragg"
)
func TestPCore(t *testing.T) {
diff --git a/examples/pvlv/cond/blocks_test.go b/examples/pvlv/cond/blocks_test.go
index 4b63b9a33..02fb23e86 100644
--- a/examples/pvlv/cond/blocks_test.go
+++ b/examples/pvlv/cond/blocks_test.go
@@ -8,8 +8,6 @@ import (
"fmt"
"strings"
"testing"
-
- "github.com/goki/ki/ints"
)
func TestCSContext(t *testing.T) {
@@ -22,7 +20,7 @@ func TestCSContext(t *testing.T) {
css[trl.CS] = cnt + 1
cnt = ctxs[trl.Context]
ctxs[trl.Context] = cnt + 1
- maxTicks = ints.MaxInt(maxTicks, trl.NTicks)
+ maxTicks = max(maxTicks, trl.NTicks)
if trl.CS == "" {
t.Errorf("CS is empty: %s in block: %s trial: %s\n", trl.CS, blnm, trl.Name)
diff --git a/examples/pvlv/cond/cond.go b/examples/pvlv/cond/cond.go
index 494ff9d6a..e0a174ffc 100644
--- a/examples/pvlv/cond/cond.go
+++ b/examples/pvlv/cond/cond.go
@@ -8,23 +8,23 @@ package cond
type Condition struct {
// identifier for this type of configuration
- Name string `desc:"identifier for this type of configuration"`
+ Name string
// description of this configuration
- Desc string `desc:"description of this configuration"`
+ Desc string
// mix of trial types per block to run -- must be listed in AllBlocks
- Block string `desc:"mix of trial types per block to run -- must be listed in AllBlocks"`
+ Block string
// use a permuted list to ensure an exact number of trials have US -- else random draw each time
- FixedProb bool `desc:"use a permuted list to ensure an exact number of trials have US -- else random draw each time"`
+ FixedProb bool
// number of full blocks of different trial types to run (like Epochs)
- NBlocks int `desc:"number of full blocks of different trial types to run (like Epochs)"`
+ NBlocks int
// number of behavioral trials per block -- blocks, with the different types of Trials specified in Block allocated across these Trials. More different trial types and greater stochasticity (lower probability) of US presentation requires more trials.
- NTrials int `desc:"number of behavioral trials per block -- blocks, with the different types of Trials specified in Block allocated across these Trials. More different trial types and greater stochasticity (lower probability) of US presentation requires more trials."`
+ NTrials int
// permute list of generated trials in random order after generation -- otherwise presented in order specified in the Block type
- Permute bool `desc:"permute list of generated trials in random order after generation -- otherwise presented in order specified in the Block type"`
+ Permute bool
}
diff --git a/examples/pvlv/cond/enumgen.go b/examples/pvlv/cond/enumgen.go
new file mode 100644
index 000000000..3b85eb9e0
--- /dev/null
+++ b/examples/pvlv/cond/enumgen.go
@@ -0,0 +1,121 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package cond
+
+import (
+ "errors"
+ "log"
+ "strconv"
+ "strings"
+
+ "goki.dev/enums"
+)
+
+var _ValenceValues = []Valence{0, 1}
+
+// ValenceN is the highest valid value
+// for type Valence, plus one.
+const ValenceN Valence = 2
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _ValenceNoOp() {
+ var x [1]struct{}
+ _ = x[Pos-(0)]
+ _ = x[Neg-(1)]
+}
+
+var _ValenceNameToValueMap = map[string]Valence{
+ `Pos`: 0,
+ `pos`: 0,
+ `Neg`: 1,
+ `neg`: 1,
+}
+
+var _ValenceDescMap = map[Valence]string{
+ 0: ``,
+ 1: ``,
+}
+
+var _ValenceMap = map[Valence]string{
+ 0: `Pos`,
+ 1: `Neg`,
+}
+
+// String returns the string representation
+// of this Valence value.
+func (i Valence) String() string {
+ if str, ok := _ValenceMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the Valence value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *Valence) SetString(s string) error {
+ if val, ok := _ValenceNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _ValenceNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type Valence")
+}
+
+// Int64 returns the Valence value as an int64.
+func (i Valence) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the Valence value from an int64.
+func (i *Valence) SetInt64(in int64) {
+ *i = Valence(in)
+}
+
+// Desc returns the description of the Valence value.
+func (i Valence) Desc() string {
+ if str, ok := _ValenceDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// ValenceValues returns all possible values
+// for the type Valence.
+func ValenceValues() []Valence {
+ return _ValenceValues
+}
+
+// Values returns all possible values
+// for the type Valence.
+func (i Valence) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_ValenceValues))
+ for i, d := range _ValenceValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type Valence.
+func (i Valence) IsValid() bool {
+ _, ok := _ValenceMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i Valence) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *Valence) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
diff --git a/examples/pvlv/cond/env.go b/examples/pvlv/cond/env.go
index 1df7be5a3..c55ab9620 100644
--- a/examples/pvlv/cond/env.go
+++ b/examples/pvlv/cond/env.go
@@ -4,12 +4,13 @@
package cond
+//go:generate goki generate -add-types
+
import (
"fmt"
- "github.com/emer/emergent/env"
- "github.com/emer/etable/etensor"
- "github.com/goki/ki/ints"
+ "github.com/emer/emergent/v2/env"
+ "goki.dev/etable/v2/etensor"
)
// CondEnv provides a flexible implementation of standard Pavlovian
@@ -26,61 +27,61 @@ import (
type CondEnv struct {
// name of this environment
- Nm string `desc:"name of this environment"`
+ Nm string
// description of this environment
- Dsc string `desc:"description of this environment"`
+ Dsc string
// number of Y repetitions for localist reps
- NYReps int `desc:"number of Y repetitions for localist reps"`
+ NYReps int
// current run name
- RunName string `desc:"current run name"`
+ RunName string
// description of current run
- RunDesc string `desc:"description of current run"`
+ RunDesc string
// name of current condition
- CondName string `desc:"name of current condition"`
+ CondName string
// description of current condition
- CondDesc string `desc:"description of current condition"`
+ CondDesc string
- // [view: inline] counter over runs
- Run env.Ctr `inactive:"+" view:"inline" desc:"counter over runs"`
+ // counter over runs
+ Run env.Ctr `inactive:"+" view:"inline"`
- // [view: inline] counter over Condition within a run -- Max depends on number of conditions specified in given Run
- Condition env.Ctr `inactive:"+" view:"inline" desc:"counter over Condition within a run -- Max depends on number of conditions specified in given Run"`
+ // counter over Condition within a run -- Max depends on number of conditions specified in given Run
+ Condition env.Ctr `inactive:"+" view:"inline"`
- // [view: inline] counter over full blocks of all trial types within a Condition -- like an Epoch
- Block env.Ctr `inactive:"+" view:"inline" desc:"counter over full blocks of all trial types within a Condition -- like an Epoch"`
+ // counter over full blocks of all trial types within a Condition -- like an Epoch
+ Block env.Ctr `inactive:"+" view:"inline"`
- // [view: inline] counter of behavioral trials within a Block
- Trial env.Ctr `inactive:"+" view:"inline" desc:"counter of behavioral trials within a Block"`
+ // counter of behavioral trials within a Block
+ Trial env.Ctr `inactive:"+" view:"inline"`
- // [view: inline] counter of discrete steps within a behavioral trial -- typically maps onto Alpha / Theta cycle in network
- Tick env.Ctr `inactive:"+" view:"inline" desc:"counter of discrete steps within a behavioral trial -- typically maps onto Alpha / Theta cycle in network"`
+ // counter of discrete steps within a behavioral trial -- typically maps onto Alpha / Theta cycle in network
+ Tick env.Ctr `inactive:"+" view:"inline"`
// name of current trial step
- TrialName string `inactive:"+" desc:"name of current trial step"`
+ TrialName string `inactive:"+"`
// type of current trial step
- TrialType string `inactive:"+" desc:"type of current trial step"`
+ TrialType string `inactive:"+"`
// decoded value of USTimeIn
- USTimeInStr string `inactive:"+" desc:"decoded value of USTimeIn"`
+ USTimeInStr string `inactive:"+"`
// current generated set of trials per Block
- Trials []*Trial `desc:"current generated set of trials per Block"`
+ Trials []*Trial
// copy of current run parameters
- CurRun Run `desc:"copy of current run parameters"`
+ CurRun Run
// copy of info for current trial
- CurTrial Trial `desc:"copy of info for current trial"`
+ CurTrial Trial
// current rendered state tensors -- extensible map
- CurStates map[string]*etensor.Float32 `desc:"current rendered state tensors -- extensible map"`
+ CurStates map[string]*etensor.Float32
}
func (ev *CondEnv) Name() string { return ev.Nm }
@@ -223,9 +224,9 @@ func (ev *CondEnv) RenderTrial(trli, tick int) {
}
minStart := trl.CSStart
if trl.CS2Start > 0 {
- minStart = ints.MinInt(minStart, trl.CS2Start)
+ minStart = min(minStart, trl.CS2Start)
}
- maxEnd := ints.MaxInt(trl.CSEnd, trl.CS2End)
+ maxEnd := max(trl.CSEnd, trl.CS2End)
if tick >= minStart && tick <= maxEnd {
SetContext(ctxt, ev.NYReps, trl.Context)
diff --git a/examples/pvlv/cond/gtigen.go b/examples/pvlv/cond/gtigen.go
new file mode 100644
index 000000000..77d1e1ecd
--- /dev/null
+++ b/examples/pvlv/cond/gtigen.go
@@ -0,0 +1,131 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package cond
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/pvlv/cond.Condition",
+ ShortName: "cond.Condition",
+ IDName: "condition",
+ Doc: "Condition defines parameters for running a specific type of conditioning expt",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Name", >i.Field{Name: "Name", Type: "string", LocalType: "string", Doc: "identifier for this type of configuration", Directives: gti.Directives{}, Tag: ""}},
+ {"Desc", >i.Field{Name: "Desc", Type: "string", LocalType: "string", Doc: "description of this configuration", Directives: gti.Directives{}, Tag: ""}},
+ {"Block", >i.Field{Name: "Block", Type: "string", LocalType: "string", Doc: "mix of trial types per block to run -- must be listed in AllBlocks", Directives: gti.Directives{}, Tag: ""}},
+ {"FixedProb", >i.Field{Name: "FixedProb", Type: "bool", LocalType: "bool", Doc: "use a permuted list to ensure an exact number of trials have US -- else random draw each time", Directives: gti.Directives{}, Tag: ""}},
+ {"NBlocks", >i.Field{Name: "NBlocks", Type: "int", LocalType: "int", Doc: "number of full blocks of different trial types to run (like Epochs)", Directives: gti.Directives{}, Tag: ""}},
+ {"NTrials", >i.Field{Name: "NTrials", Type: "int", LocalType: "int", Doc: "number of behavioral trials per block -- blocks, with the different types of Trials specified in Block allocated across these Trials. More different trial types and greater stochasticity (lower probability) of US presentation requires more trials.", Directives: gti.Directives{}, Tag: ""}},
+ {"Permute", >i.Field{Name: "Permute", Type: "bool", LocalType: "bool", Doc: "permute list of generated trials in random order after generation -- otherwise presented in order specified in the Block type", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/pvlv/cond.CondEnv",
+ ShortName: "cond.CondEnv",
+ IDName: "cond-env",
+ Doc: "CondEnv provides a flexible implementation of standard Pavlovian\nconditioning experiments involving CS -> US sequences (trials).\nHas a large database of standard conditioning paradigms\nparameterized in a controlled manner.\n\nTime hierarchy:\n* Run = sequence of 1 or more Conditions\n* Condition = specific mix of trial types, generated at start of Condition\n* Block = one full pass through all trial types generated for condition (like Epoch)\n* Trial = one behavioral trial consisting of CS -> US presentation over time steps (Ticks)\n* Tick = discrete time steps within behavioral Trial, typically one Network update (Alpha / Theta cycle)",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Nm", >i.Field{Name: "Nm", Type: "string", LocalType: "string", Doc: "name of this environment", Directives: gti.Directives{}, Tag: ""}},
+ {"Dsc", >i.Field{Name: "Dsc", Type: "string", LocalType: "string", Doc: "description of this environment", Directives: gti.Directives{}, Tag: ""}},
+ {"NYReps", >i.Field{Name: "NYReps", Type: "int", LocalType: "int", Doc: "number of Y repetitions for localist reps", Directives: gti.Directives{}, Tag: ""}},
+ {"RunName", >i.Field{Name: "RunName", Type: "string", LocalType: "string", Doc: "current run name", Directives: gti.Directives{}, Tag: ""}},
+ {"RunDesc", >i.Field{Name: "RunDesc", Type: "string", LocalType: "string", Doc: "description of current run", Directives: gti.Directives{}, Tag: ""}},
+ {"CondName", >i.Field{Name: "CondName", Type: "string", LocalType: "string", Doc: "name of current condition", Directives: gti.Directives{}, Tag: ""}},
+ {"CondDesc", >i.Field{Name: "CondDesc", Type: "string", LocalType: "string", Doc: "description of current condition", Directives: gti.Directives{}, Tag: ""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "counter over runs", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"Condition", >i.Field{Name: "Condition", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "counter over Condition within a run -- Max depends on number of conditions specified in given Run", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"Block", >i.Field{Name: "Block", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "counter over full blocks of all trial types within a Condition -- like an Epoch", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "counter of behavioral trials within a Block", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"Tick", >i.Field{Name: "Tick", Type: "github.com/emer/emergent/v2/env.Ctr", LocalType: "env.Ctr", Doc: "counter of discrete steps within a behavioral trial -- typically maps onto Alpha / Theta cycle in network", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"inline\""}},
+ {"TrialName", >i.Field{Name: "TrialName", Type: "string", LocalType: "string", Doc: "name of current trial step", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"TrialType", >i.Field{Name: "TrialType", Type: "string", LocalType: "string", Doc: "type of current trial step", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"USTimeInStr", >i.Field{Name: "USTimeInStr", Type: "string", LocalType: "string", Doc: "decoded value of USTimeIn", Directives: gti.Directives{}, Tag: "inactive:\"+\""}},
+ {"Trials", >i.Field{Name: "Trials", Type: "[]*github.com/emer/axon/examples/pvlv/cond.Trial", LocalType: "[]*Trial", Doc: "current generated set of trials per Block", Directives: gti.Directives{}, Tag: ""}},
+ {"CurRun", >i.Field{Name: "CurRun", Type: "github.com/emer/axon/examples/pvlv/cond.Run", LocalType: "Run", Doc: "copy of current run parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"CurTrial", >i.Field{Name: "CurTrial", Type: "github.com/emer/axon/examples/pvlv/cond.Trial", LocalType: "Trial", Doc: "copy of info for current trial", Directives: gti.Directives{}, Tag: ""}},
+ {"CurStates", >i.Field{Name: "CurStates", Type: "map[string]*goki.dev/etable/v2/etensor.Float32", LocalType: "map[string]*etensor.Float32", Doc: "current rendered state tensors -- extensible map", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/pvlv/cond.Run",
+ ShortName: "cond.Run",
+ IDName: "run",
+ Doc: "Run is a sequence of Conditions to run in order",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Name", >i.Field{Name: "Name", Type: "string", LocalType: "string", Doc: "Name of the run", Directives: gti.Directives{}, Tag: ""}},
+ {"Desc", >i.Field{Name: "Desc", Type: "string", LocalType: "string", Doc: "Description", Directives: gti.Directives{}, Tag: ""}},
+ {"Weights", >i.Field{Name: "Weights", Type: "string", LocalType: "string", Doc: "name of condition for weights file to load prior to starting -- allows faster testing but weights may be out of date", Directives: gti.Directives{}, Tag: ""}},
+ {"Cond1", >i.Field{Name: "Cond1", Type: "string", LocalType: "string", Doc: "name of condition 1", Directives: gti.Directives{}, Tag: ""}},
+ {"Cond2", >i.Field{Name: "Cond2", Type: "string", LocalType: "string", Doc: "name of condition 2", Directives: gti.Directives{}, Tag: ""}},
+ {"Cond3", >i.Field{Name: "Cond3", Type: "string", LocalType: "string", Doc: "name of condition 3", Directives: gti.Directives{}, Tag: ""}},
+ {"Cond4", >i.Field{Name: "Cond4", Type: "string", LocalType: "string", Doc: "name of condition 4", Directives: gti.Directives{}, Tag: ""}},
+ {"Cond5", >i.Field{Name: "Cond5", Type: "string", LocalType: "string", Doc: "name of condition 5", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/pvlv/cond.Valence",
+ ShortName: "cond.Valence",
+ IDName: "valence",
+ Doc: "Valence",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/pvlv/cond.Trial",
+ ShortName: "cond.Trial",
+ IDName: "trial",
+ Doc: "Trial represents one behavioral trial, unfolding over\nNTicks individual time steps, with one or more CS's (conditioned stimuli)\nand a US (unconditioned stimulus -- outcome).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Name", >i.Field{Name: "Name", Type: "string", LocalType: "string", Doc: "conventional suffixes: _R = reward, _NR = non-reward; _test = test trial (no learning)", Directives: gti.Directives{}, Tag: ""}},
+ {"Test", >i.Field{Name: "Test", Type: "bool", LocalType: "bool", Doc: "true if testing only -- no learning", Directives: gti.Directives{}, Tag: ""}},
+ {"Pct", >i.Field{Name: "Pct", Type: "float32", LocalType: "float32", Doc: "Percent of all trials for this type", Directives: gti.Directives{}, Tag: ""}},
+ {"Valence", >i.Field{Name: "Valence", Type: "github.com/emer/axon/examples/pvlv/cond.Valence", LocalType: "Valence", Doc: "Positive or negative reward valence", Directives: gti.Directives{}, Tag: ""}},
+ {"USProb", >i.Field{Name: "USProb", Type: "float32", LocalType: "float32", Doc: "Probability of US", Directives: gti.Directives{}, Tag: ""}},
+ {"MixedUS", >i.Field{Name: "MixedUS", Type: "bool", LocalType: "bool", Doc: "Mixed US set?", Directives: gti.Directives{}, Tag: ""}},
+ {"USMag", >i.Field{Name: "USMag", Type: "float32", LocalType: "float32", Doc: "US magnitude", Directives: gti.Directives{}, Tag: ""}},
+ {"NTicks", >i.Field{Name: "NTicks", Type: "int", LocalType: "int", Doc: "Number of ticks for a trial", Directives: gti.Directives{}, Tag: ""}},
+ {"CS", >i.Field{Name: "CS", Type: "string", LocalType: "string", Doc: "Conditioned stimulus", Directives: gti.Directives{}, Tag: ""}},
+ {"CSStart", >i.Field{Name: "CSStart", Type: "int", LocalType: "int", Doc: "Tick of CS start", Directives: gti.Directives{}, Tag: ""}},
+ {"CSEnd", >i.Field{Name: "CSEnd", Type: "int", LocalType: "int", Doc: "Tick of CS end", Directives: gti.Directives{}, Tag: ""}},
+ {"CS2Start", >i.Field{Name: "CS2Start", Type: "int", LocalType: "int", Doc: "Tick of CS2 start: -1 for none", Directives: gti.Directives{}, Tag: ""}},
+ {"CS2End", >i.Field{Name: "CS2End", Type: "int", LocalType: "int", Doc: "Tick of CS2 end: -1 for none", Directives: gti.Directives{}, Tag: ""}},
+ {"US", >i.Field{Name: "US", Type: "int", LocalType: "int", Doc: "Unconditioned stimulus", Directives: gti.Directives{}, Tag: ""}},
+ {"USStart", >i.Field{Name: "USStart", Type: "int", LocalType: "int", Doc: "Tick for start of US presentation", Directives: gti.Directives{}, Tag: ""}},
+ {"USEnd", >i.Field{Name: "USEnd", Type: "int", LocalType: "int", Doc: "Tick for end of US presentation", Directives: gti.Directives{}, Tag: ""}},
+ {"Context", >i.Field{Name: "Context", Type: "string", LocalType: "string", Doc: "Context -- typically same as CS -- if blank CS will be copied -- different in certain extinguishing contexts", Directives: gti.Directives{}, Tag: ""}},
+ {"USOn", >i.Field{Name: "USOn", Type: "bool", LocalType: "bool", Doc: "for rendered trials, true if US active", Directives: gti.Directives{}, Tag: ""}},
+ {"CSOn", >i.Field{Name: "CSOn", Type: "bool", LocalType: "bool", Doc: "for rendered trials, true if CS active", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/examples/pvlv/cond.Block",
+ ShortName: "cond.Block",
+ IDName: "block",
+ Doc: "Block represents a set of trial types",
+ Directives: gti.Directives{},
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/pvlv/cond/inputs.go b/examples/pvlv/cond/inputs.go
index 21ae8523e..db7c76d0c 100644
--- a/examples/pvlv/cond/inputs.go
+++ b/examples/pvlv/cond/inputs.go
@@ -4,7 +4,7 @@
package cond
-import "github.com/emer/etable/etensor"
+import "goki.dev/etable/v2/etensor"
var (
NUSs = 4
diff --git a/examples/pvlv/cond/run.go b/examples/pvlv/cond/run.go
index b68d5701f..abd015bee 100644
--- a/examples/pvlv/cond/run.go
+++ b/examples/pvlv/cond/run.go
@@ -8,28 +8,28 @@ package cond
type Run struct {
// Name of the run
- Name string `desc:"Name of the run"`
+ Name string
// Description
- Desc string `desc:"Description"`
+ Desc string
// name of condition for weights file to load prior to starting -- allows faster testing but weights may be out of date
- Weights string `desc:"name of condition for weights file to load prior to starting -- allows faster testing but weights may be out of date"`
+ Weights string
// name of condition 1
- Cond1 string `desc:"name of condition 1"`
+ Cond1 string
// name of condition 2
- Cond2 string `desc:"name of condition 2"`
+ Cond2 string
// name of condition 3
- Cond3 string `desc:"name of condition 3"`
+ Cond3 string
// name of condition 4
- Cond4 string `desc:"name of condition 4"`
+ Cond4 string
// name of condition 5
- Cond5 string `desc:"name of condition 5"`
+ Cond5 string
}
// NConds returns the number of conditions in this Run
diff --git a/examples/pvlv/cond/trial.go b/examples/pvlv/cond/trial.go
index 01c94742a..8962c478e 100644
--- a/examples/pvlv/cond/trial.go
+++ b/examples/pvlv/cond/trial.go
@@ -7,88 +7,79 @@ package cond
import (
"math/rand"
- "github.com/emer/emergent/erand"
- "github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/mat32/v2"
)
-//go:generate stringer -type=Valence
-
// Valence
-type Valence int32
+type Valence int32 //enums:enum
const (
Pos Valence = iota
Neg
- ValenceN
)
-var KiT_Valence = kit.Enums.AddEnum(ValenceN, kit.NotBitFlag, nil)
-
-func (ev Valence) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *Valence) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
-
// Trial represents one behavioral trial, unfolding over
// NTicks individual time steps, with one or more CS's (conditioned stimuli)
// and a US (unconditioned stimulus -- outcome).
type Trial struct {
// conventional suffixes: _R = reward, _NR = non-reward; _test = test trial (no learning)
- Name string `desc:"conventional suffixes: _R = reward, _NR = non-reward; _test = test trial (no learning)"`
+ Name string
// true if testing only -- no learning
- Test bool `desc:"true if testing only -- no learning"`
+ Test bool
// Percent of all trials for this type
- Pct float32 `desc:"Percent of all trials for this type"`
+ Pct float32
// Positive or negative reward valence
- Valence Valence `desc:"Positive or negative reward valence"`
+ Valence Valence
// Probability of US
- USProb float32 `desc:"Probability of US"`
+ USProb float32
// Mixed US set?
- MixedUS bool `desc:"Mixed US set?"`
+ MixedUS bool
// US magnitude
- USMag float32 `desc:"US magnitude"`
+ USMag float32
// Number of ticks for a trial
- NTicks int `desc:"Number of ticks for a trial"`
+ NTicks int
// Conditioned stimulus
- CS string `desc:"Conditioned stimulus"`
+ CS string
// Tick of CS start
- CSStart int `desc:"Tick of CS start"`
+ CSStart int
// Tick of CS end
- CSEnd int `desc:"Tick of CS end"`
+ CSEnd int
// Tick of CS2 start: -1 for none
- CS2Start int `desc:"Tick of CS2 start: -1 for none"`
+ CS2Start int
// Tick of CS2 end: -1 for none
- CS2End int `desc:"Tick of CS2 end: -1 for none"`
+ CS2End int
// Unconditioned stimulus
- US int `desc:"Unconditioned stimulus"`
+ US int
// Tick for start of US presentation
- USStart int `desc:"Tick for start of US presentation"`
+ USStart int
// Tick for end of US presentation
- USEnd int `desc:"Tick for end of US presentation"`
+ USEnd int
// Context -- typically same as CS -- if blank CS will be copied -- different in certain extinguishing contexts
- Context string `desc:"Context -- typically same as CS -- if blank CS will be copied -- different in certain extinguishing contexts"`
+ Context string
// for rendered trials, true if US active
- USOn bool `desc:"for rendered trials, true if US active"`
+ USOn bool
// for rendered trials, true if CS active
- CSOn bool `desc:"for rendered trials, true if CS active"`
+ CSOn bool
}
// Block represents a set of trial types
diff --git a/examples/pvlv/cond/valence_string.go b/examples/pvlv/cond/valence_string.go
deleted file mode 100644
index b60ca79b1..000000000
--- a/examples/pvlv/cond/valence_string.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Code generated by "stringer -type=Valence"; DO NOT EDIT.
-
-package cond
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[Pos-0]
- _ = x[Neg-1]
- _ = x[ValenceN-2]
-}
-
-const _Valence_name = "PosNegValenceN"
-
-var _Valence_index = [...]uint8{0, 3, 6, 14}
-
-func (i Valence) String() string {
- if i < 0 || i >= Valence(len(_Valence_index)-1) {
- return "Valence(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Valence_name[_Valence_index[i]:_Valence_index[i+1]]
-}
-
-func (i *Valence) FromString(s string) error {
- for j := 0; j < len(_Valence_index)-1; j++ {
- if s == _Valence_name[_Valence_index[j]:_Valence_index[j+1]] {
- *i = Valence(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: Valence")
-}
diff --git a/examples/pvlv/config.go b/examples/pvlv/config.go
index 0f50df427..068807596 100644
--- a/examples/pvlv/config.go
+++ b/examples/pvlv/config.go
@@ -10,107 +10,107 @@ package main
type EnvConfig struct {
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
- Env map[string]any `desc:"env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"`
+ Env map[string]any
- // [def: PosAcq_A100B50] environment run name
- RunName string `def:"PosAcq_A100B50" desc:"environment run name"`
+ // environment run name
+ RunName string `def:"PosAcq_A100B50"`
// override the default number of blocks to run conditions with NBlocks
- SetNBlocks bool `desc:"override the default number of blocks to run conditions with NBlocks"`
+ SetNBlocks bool
- // [viewif: SetNBlocks] number of blocks to run if SetNBlocks is true
- NBlocks int `viewif:"SetNBlocks" desc:"number of blocks to run if SetNBlocks is true"`
+ // number of blocks to run if SetNBlocks is true
+ NBlocks int `viewif:"SetNBlocks"`
}
// ParamConfig has config parameters related to sim params
type ParamConfig struct {
// PVLV parameters -- can set any field/subfield on Net.PVLV params, using standard TOML formatting
- PVLV map[string]any `desc:"PVLV parameters -- can set any field/subfield on Net.PVLV params, using standard TOML formatting"`
+ PVLV map[string]any
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: false] use the GPU for computation -- only for testing in this model -- not faster
- GPU bool `def:"false" desc:"use the GPU for computation -- only for testing in this model -- not faster"`
+ // use the GPU for computation -- only for testing in this model -- not faster
+ GPU bool `def:"false"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 1] [min: 1] total number of runs to do when running Train
- NRuns int `def:"1" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"1" min:"1"`
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
- // [def: ['DA','VSPatch']] stats to aggregate at higher levels
- AggStats []string `def:"['DA','VSPatch']" desc:"stats to aggregate at higher levels"`
+ // ] stats to aggregate at higher levels
+ AggStats []string `def:"['DA','VSPatch']"`
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save block log to file, as .blk.tsv typically
- Block bool `def:"true" nest:"+" desc:"if true, save block log to file, as .blk.tsv typically"`
+ // if true, save block log to file, as .blk.tsv typically
+ Block bool `def:"true" nest:"+"`
- // [def: true] if true, save condition log to file, as .cnd.tsv typically
- Cond bool `def:"true" nest:"+" desc:"if true, save condition log to file, as .cnd.tsv typically"`
+ // if true, save condition log to file, as .cnd.tsv typically
+ Cond bool `def:"true" nest:"+"`
- // [def: false] if true, save trial log to file, as .trl.tsv typically
- Trial bool `def:"false" nest:"+" desc:"if true, save trial log to file, as .trl.tsv typically"`
+ // if true, save trial log to file, as .trl.tsv typically
+ Trial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
type Config struct {
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] environment configuration options
- Env EnvConfig `view:"add-fields" desc:"environment configuration options"`
+ // environment configuration options
+ Env EnvConfig `view:"add-fields"`
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/pvlv/effort_plot.go b/examples/pvlv/effort_plot.go
index 0379e60b0..0cf027eb8 100644
--- a/examples/pvlv/effort_plot.go
+++ b/examples/pvlv/effort_plot.go
@@ -10,24 +10,23 @@ import (
"strconv"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/erand"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/emer/etable/minmax"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/giv"
- "github.com/goki/ki/bools"
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/glop/num"
+ "goki.dev/icons"
)
func DriveEffortGUI() {
ep := &DrEffPlot{}
ep.Config()
- win := ep.ConfigGui()
- win.StartEventLoop()
+ b := ep.ConfigGUI()
+ b.NewWindow().Run().Wait()
}
// LogPrec is precision for saving float values in logs
@@ -37,40 +36,34 @@ const LogPrec = 4
type DrEffPlot struct {
// context just for plotting
- Context axon.Context `desc:"context just for plotting"`
+ Context axon.Context
// PVLV params
- PVLV axon.PVLV `desc:"PVLV params"`
+ PVLV axon.PVLV
// total number of time steps to simulate
- TimeSteps int `desc:"total number of time steps to simulate"`
+ TimeSteps int
// range for number of time steps between US receipt
- USTime minmax.Int `desc:"range for number of time steps between US receipt"`
+ USTime minmax.Int
// range for random effort per step
- Effort minmax.F32 `desc:"range for random effort per step"`
+ Effort minmax.F32
- // [view: no-inline] table for plot
- Table *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ Table *etable.Table `view:"no-inline"`
- // [view: -] the plot
- Plot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ Plot *eplot.Plot2D `view:"-"`
- // [view: no-inline] table for plot
- TimeTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ TimeTable *etable.Table `view:"no-inline"`
- // [view: -] the plot
- TimePlot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ TimePlot *eplot.Plot2D `view:"-"`
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
-
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
-
- // [view: -] random number generator
- Rand erand.SysRand `view:"-" desc:"random number generator"`
+ // random number generator
+ Rand erand.SysRand `view:"-"`
}
// Config configures all the elements using the standard functions
@@ -99,7 +92,7 @@ func (ss *DrEffPlot) Update() {
}
// EffortPlot plots the equation as a function of effort / time
-func (ss *DrEffPlot) EffortPlot() {
+func (ss *DrEffPlot) EffortPlot() { //gti:add
ss.Update()
ctx := &ss.Context
pp := &ss.PVLV
@@ -118,7 +111,7 @@ func (ss *DrEffPlot) EffortPlot() {
}
// UrgencyPlot plots the equation as a function of effort / time
-func (ss *DrEffPlot) UrgencyPlot() {
+func (ss *DrEffPlot) UrgencyPlot() { //gti:add
ctx := &ss.Context
pp := &ss.PVLV
ss.Update()
@@ -161,7 +154,7 @@ func (ss *DrEffPlot) ConfigPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot
/////////////////////////////////////////////////////////////////
// TimeRun runs the equation over time.
-func (ss *DrEffPlot) TimeRun() {
+func (ss *DrEffPlot) TimeRun() { //gti:add
ss.Update()
dt := ss.TimeTable
pp := &ss.PVLV
@@ -194,7 +187,7 @@ func (ss *DrEffPlot) TimeRun() {
dt.SetCellFloat("Drive", ti, float64(dr))
axon.SetGlbUSposV(ctx, 0, axon.GvUSpos, 1, usv)
- axon.SetGlbV(ctx, 0, axon.GvHadRew, bools.ToFloat32(usv > 0))
+ axon.SetGlbV(ctx, 0, axon.GvHadRew, num.FromBool[float32](usv > 0))
pp.EffortUrgencyUpdt(ctx, 0, 0)
pp.DriveUpdt(ctx, 0)
}
@@ -231,75 +224,29 @@ func (ss *DrEffPlot) ConfigTimePlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.
return plt
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *DrEffPlot) ConfigGui() *gi.Window {
- width := 1600
- height := 1200
-
- // gi.WinEventTrace = true
-
- win := gi.NewMainWindow("dreff_plot", "Drive / Effort / Urgency Plotting", width, height)
- ss.Win = win
-
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *DrEffPlot) ConfigGUI() *gi.Body {
+ b := gi.NewAppBody("dreffa_plot").SetTitle("Drive / Effort / Urgency Plotting")
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMax()
-
- sv := giv.AddNewStructView(split, "sv")
+ split := gi.NewSplits(b, "split")
+ sv := giv.NewStructView(split, "sv")
sv.SetStruct(ss)
- tv := gi.AddNewTabView(split, "tv")
+ tv := gi.NewTabs(split, "tv")
- plt := tv.AddNewTab(eplot.KiT_Plot2D, "EffortPlot").(*eplot.Plot2D)
- ss.Plot = ss.ConfigPlot(plt, ss.Table)
+ ss.Plot = eplot.NewSubPlot(tv.NewTab("Effort Plot"))
+ ss.ConfigPlot(ss.Plot, ss.Table)
- plt = tv.AddNewTab(eplot.KiT_Plot2D, "TimePlot").(*eplot.Plot2D)
- ss.TimePlot = ss.ConfigTimePlot(plt, ss.TimeTable)
+ ss.TimePlot = eplot.NewSubPlot(tv.NewTab("TimePlot"))
+ ss.ConfigTimePlot(ss.TimePlot, ss.TimeTable)
split.SetSplits(.3, .7)
- tbar.AddAction(gi.ActOpts{Label: "Effort Plot", Icon: "update", Tooltip: "plot effort equation."}, win.This(), func(recv, send ki.Ki, sig int64, data any) {
- ss.EffortPlot()
- vp.SetNeedsFullRender()
+ b.AddAppBar(func(tb *gi.Toolbar) {
+ giv.NewFuncButton(tb, ss.EffortPlot).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.UrgencyPlot).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.TimeRun).SetIcon(icons.PlayArrow)
})
- tbar.AddAction(gi.ActOpts{Label: "Urgency Plot", Icon: "update", Tooltip: "plot urgency equation."}, win.This(), func(recv, send ki.Ki, sig int64, data any) {
- ss.UrgencyPlot()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Time Run", Icon: "update", Tooltip: "Run a simulated time-evolution and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data any) {
- ss.TimeRun()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send ki.Ki, sig int64, data any) {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/pvlv/README.md")
- })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.MainMenuUpdated()
- return win
+ return b
}
diff --git a/examples/pvlv/gtigen.go b/examples/pvlv/gtigen.go
new file mode 100644
index 000000000..594037897
--- /dev/null
+++ b/examples/pvlv/gtigen.go
@@ -0,0 +1,152 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.EnvConfig",
+ ShortName: "main.EnvConfig",
+ IDName: "env-config",
+ Doc: "EnvConfig has config params for environment\nnote: only adding fields for key Env params that matter for both Network and Env\nother params are set via the Env map data mechanism.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Env", >i.Field{Name: "Env", Type: "map[string]any", LocalType: "map[string]any", Doc: "env parameters -- can set any field/subfield on Env struct, using standard TOML formatting", Directives: gti.Directives{}, Tag: ""}},
+ {"RunName", >i.Field{Name: "RunName", Type: "string", LocalType: "string", Doc: "environment run name", Directives: gti.Directives{}, Tag: "def:\"PosAcq_A100B50\""}},
+ {"SetNBlocks", >i.Field{Name: "SetNBlocks", Type: "bool", LocalType: "bool", Doc: "override the default number of blocks to run conditions with NBlocks", Directives: gti.Directives{}, Tag: ""}},
+ {"NBlocks", >i.Field{Name: "NBlocks", Type: "int", LocalType: "int", Doc: "number of blocks to run if SetNBlocks is true", Directives: gti.Directives{}, Tag: "viewif:\"SetNBlocks\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"PVLV", >i.Field{Name: "PVLV", Type: "map[string]any", LocalType: "map[string]any", Doc: "PVLV parameters -- can set any field/subfield on Net.PVLV params, using standard TOML formatting", Directives: gti.Directives{}, Tag: ""}},
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- only for testing in this model -- not faster", Directives: gti.Directives{}, Tag: "def:\"false\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"NRuns", >i.Field{Name: "NRuns", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"1\" min:\"1\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"AggStats", >i.Field{Name: "AggStats", Type: "[]string", LocalType: "[]string", Doc: "] stats to aggregate at higher levels", Directives: gti.Directives{}, Tag: "def:\"['DA','VSPatch']\""}},
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Block", >i.Field{Name: "Block", Type: "bool", LocalType: "bool", Doc: "if true, save block log to file, as .blk.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Cond", >i.Field{Name: "Cond", Type: "bool", LocalType: "bool", Doc: "if true, save condition log to file, as .cnd.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save trial log to file, as .trl.tsv typically", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Env", >i.Field{Name: "Env", Type: "github.com/emer/axon/examples/pvlv.EnvConfig", LocalType: "EnvConfig", Doc: "environment configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/pvlv.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/pvlv.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/pvlv.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.DrEffPlot",
+ ShortName: "main.DrEffPlot",
+ IDName: "dr-eff-plot",
+ Doc: "DrEffPlot holds the params, table, etc",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "context just for plotting", Directives: gti.Directives{}, Tag: ""}},
+ {"PVLV", >i.Field{Name: "PVLV", Type: "github.com/emer/axon/axon.PVLV", LocalType: "axon.PVLV", Doc: "PVLV params", Directives: gti.Directives{}, Tag: ""}},
+ {"TimeSteps", >i.Field{Name: "TimeSteps", Type: "int", LocalType: "int", Doc: "total number of time steps to simulate", Directives: gti.Directives{}, Tag: ""}},
+ {"USTime", >i.Field{Name: "USTime", Type: "goki.dev/etable/v2/minmax.Int", LocalType: "minmax.Int", Doc: "range for number of time steps between US receipt", Directives: gti.Directives{}, Tag: ""}},
+ {"Effort", >i.Field{Name: "Effort", Type: "goki.dev/etable/v2/minmax.F32", LocalType: "minmax.F32", Doc: "range for random effort per step", Directives: gti.Directives{}, Tag: ""}},
+ {"Table", >i.Field{Name: "Table", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Plot", >i.Field{Name: "Plot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TimeTable", >i.Field{Name: "TimeTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TimePlot", >i.Field{Name: "TimePlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"Rand", >i.Field{Name: "Rand", Type: "github.com/emer/emergent/v2/erand.SysRand", LocalType: "erand.SysRand", Doc: "random number generator", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{
+ {"EffortPlot", >i.Method{Name: "EffortPlot", Doc: "EffortPlot plots the equation as a function of effort / time", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"UrgencyPlot", >i.Method{Name: "UrgencyPlot", Doc: "UrgencyPlot plots the equation as a function of effort / time", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"TimeRun", >i.Method{Name: "TimeRun", Doc: "TimeRun runs the equation over time.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ }),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/pvlv.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "all parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"Envs", >i.Field{Name: "Envs", Type: "github.com/emer/emergent/v2/env.Envs", LocalType: "env.Envs", Doc: "Environments", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/pvlv/params.go b/examples/pvlv/params.go
index b7ea3bf79..bcaf7740b 100644
--- a/examples/pvlv/params.go
+++ b/examples/pvlv/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets is the default set of parameters -- Base is always applied,
diff --git a/examples/pvlv/params_good/config.toml b/examples/pvlv/params_good/config.toml
index 374426efd..986b38b14 100644
--- a/examples/pvlv/params_good/config.toml
+++ b/examples/pvlv/params_good/config.toml
@@ -3,8 +3,8 @@ Debug = false
[Env]
RunName = "PosAcq_A100"
- SetNBlocks = true
- NBlocks = 50
+ SetNBlocks = false
+ NBlocks = 0
[Params]
Sheet = ""
@@ -18,7 +18,7 @@ Debug = false
[Run]
GPU = false
- NThreads = 2
+ NThreads = 0
Run = 0
NRuns = 1
diff --git a/examples/pvlv/params_good/params.toml b/examples/pvlv/params_good/params.toml
index 4223315e7..36d0f3a99 100644
--- a/examples/pvlv/params_good/params.toml
+++ b/examples/pvlv/params_good/params.toml
@@ -26,7 +26,7 @@
"Layer.Learn.TrgAvgAct.GiBaseInit" = "0.5"
"Layer.VSPatch.Gain" = "5"
"Layer.VSPatch.ThrInit" = "0.2"
- "Layer.VSPatch.ThrLRate" = "0.002"
+ "Layer.VSPatch.ThrLRate" = "0.0001"
"Layer.VSPatch.ThrNonRew" = "10"
[[Base]]
@@ -71,6 +71,7 @@
Sel = ".MatrixPrjn"
Desc = ""
[Base.Params]
+ "Prjn.Learn.Trace.LearnThr" = "0.0"
"Prjn.Matrix.NoGateLRate" = "0.0"
[[Base]]
@@ -96,7 +97,7 @@
Sel = ".GPiToBGThal"
Desc = "inhibition from GPi to MD"
[Base.Params]
- "Prjn.PrjnScale.Abs" = "2"
+ "Prjn.PrjnScale.Abs" = "3"
[[Base]]
Sel = ".PTpToBLAExt"
diff --git a/examples/pvlv/params_good/params_all.txt b/examples/pvlv/params_good/params_all.txt
index b53926074..ea1f4c339 100644
--- a/examples/pvlv/params_good/params_all.txt
+++ b/examples/pvlv/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
VTA: {
CeMGain: 0.75 LHbGain: 1.25
@@ -55,13 +55,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -81,7 +81,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: LDT
@@ -96,13 +96,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -122,7 +122,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
LDT: {
SrcThr: 0.05 Rew: true MaintInhib: 2 NotMaintMax: 0.4 SrcLay1Idx: 24 SrcLay2Idx: -1 SrcLay3Idx: -1 SrcLay4Idx: -1
@@ -140,13 +140,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -166,7 +166,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: USneg
@@ -181,13 +181,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -207,7 +207,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 1 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Negative DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: USposP
@@ -222,13 +222,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -248,7 +248,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 3
@@ -306,13 +306,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -332,7 +332,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 4
@@ -390,13 +390,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -416,7 +416,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: PVneg
@@ -431,13 +431,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -457,7 +457,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 1 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Negative DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: PVposP
@@ -472,13 +472,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -498,7 +498,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 7
@@ -616,13 +616,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -642,7 +642,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 8
@@ -760,13 +760,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -786,7 +786,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: DrivesP
@@ -801,13 +801,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -827,7 +827,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 11
@@ -885,13 +885,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -911,7 +911,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: VsGPi
@@ -926,13 +926,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -952,7 +952,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPi
@@ -1050,13 +1050,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1076,7 +1076,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPeOut
@@ -1134,13 +1134,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1160,7 +1160,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPeIn
@@ -1238,13 +1238,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1264,7 +1264,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
GP: {
GPType: GPeTA
@@ -1322,13 +1322,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 3 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1348,7 +1348,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: VsGPeInToVsSTNp
@@ -1423,13 +1423,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 3 C50: 0.4 ActTau: 15 DeTau: 30 KCaR: 0.4 CaRDecayTau: 200 CaInThr: 0.01 CaInTau: 50 }
@@ -1449,7 +1449,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 2 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: BLAPosAcqD1ToVsSTNs
@@ -1504,13 +1504,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1530,7 +1530,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: false SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
}
Matrix: {
GateThr: 0.05 IsVS: true OtherMatrixIdx: 21 ThalLay1Idx: 35 ThalLay2Idx: 41 ThalLay3Idx: 46 ThalLay4Idx: 51 ThalLay5Idx: -1 ThalLay6Idx: -1
@@ -1591,7 +1591,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1614,7 +1614,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1637,7 +1637,7 @@ SWt: {
Learn: {
Learn: false
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1660,7 +1660,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1683,7 +1683,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1706,7 +1706,7 @@ SWt: {
Learn: {
Learn: false
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1726,13 +1726,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -1752,7 +1752,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: false SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 0 DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Positive DAModGain: 0.5 DALRateSign: true DALRateMod: 1 AChLRateMod: 1 AChDisInhib: 5 BurstGain: 1 DipGain: 1 }
}
Matrix: {
GateThr: 0.05 IsVS: true OtherMatrixIdx: 20 ThalLay1Idx: 35 ThalLay2Idx: 41 ThalLay3Idx: 46 ThalLay4Idx: 51 ThalLay5Idx: -1 ThalLay6Idx: -1
@@ -1813,7 +1813,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1836,7 +1836,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1859,7 +1859,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1882,7 +1882,7 @@ SWt: {
Learn: {
Learn: false
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1905,7 +1905,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1928,7 +1928,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1951,7 +1951,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1974,7 +1974,7 @@ SWt: {
Learn: {
Learn: true
LRate: { Base: 0.02 Sched: 1 Mod: 1 Eff: 0.02 }
- Trace: { Tau: 1 SubMean: 0 LearnThr: 0.75 }
+ Trace: { Tau: 1 SubMean: 0 LearnThr: 0 }
KinaseCa: { SpikeG: 12 MaxISI: 100 Dt: { MTau: 5 PTau: 39 DTau: 41 ExpAdj: true
} }
}
@@ -1994,13 +1994,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2020,7 +2020,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: VsPatch
@@ -2035,13 +2035,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2061,10 +2061,10 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0.5 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.01 Diff: false SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: true DALRateMod: 0 AChLRateMod: 0.8 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: true DALRateMod: 0 AChLRateMod: 0.8 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
VSPatch: {
- Gain: 5 ThrInit: 0.2 ThrLRate: 0.002 ThrNonRew: 10
+ Gain: 5 ThrInit: 0.2 ThrLRate: 0.0001 ThrNonRew: 10
}
///////////////////////////////////////////////////
Prjn: ACCnegValPTpToVsPatch
@@ -2199,13 +2199,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: true Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.05 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2225,7 +2225,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -2263,13 +2263,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2289,7 +2289,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.01 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
}
CT: {
GeGain: 0.1 DecayTau: 0
@@ -2393,13 +2393,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2419,7 +2419,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.01 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.1 DecayTau: 0
@@ -2523,13 +2523,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2549,7 +2549,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.01 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 1 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Negative DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.1 DecayTau: 0
@@ -2630,13 +2630,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2656,7 +2656,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.01 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 1 DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
+ NeuroMod: { DAMod: D2Mod Valence: Negative DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
}
CT: {
GeGain: 0.1 DecayTau: 0
@@ -2740,13 +2740,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2766,7 +2766,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: BLAPosAcqD1ToCeMPos
@@ -2821,13 +2821,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2847,7 +2847,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 2 Valence: 1 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: D2Mod Valence: Negative DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: BLANegAcqD2ToCeMNeg
@@ -2902,13 +2902,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2928,7 +2928,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.01 Min: 0.001 }
- NeuroMod: { DAMod: 1 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
+ NeuroMod: { DAMod: D1Mod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0.5 AChLRateMod: 1 AChDisInhib: 0 BurstGain: 0.2 DipGain: 0 }
}
CT: {
GeGain: 0.1 DecayTau: 0
@@ -2969,13 +2969,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -2995,7 +2995,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -3253,13 +3253,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3279,7 +3279,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -3437,13 +3437,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3463,7 +3463,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCposUSMDToOFCposUSPT
@@ -3538,13 +3538,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3564,7 +3564,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCposUSPTToOFCposUSMD
@@ -3612,7 +3612,7 @@ Com: {
GType: InhibitoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
PrjnScale: {
- Rel: 1 Abs: 2
+ Rel: 1 Abs: 3
}
SWt: {
Init: { SPct: 0 Mean: 0.75 Var: 0 Sym: false }
@@ -3639,13 +3639,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3665,7 +3665,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.05 DecayTau: 50
@@ -3863,13 +3863,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -3889,7 +3889,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.4 DecayTau: 0
@@ -3987,13 +3987,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4013,7 +4013,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -4231,13 +4231,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4257,7 +4257,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -4395,13 +4395,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4421,7 +4421,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCnegUSMDToOFCnegUSPT
@@ -4496,13 +4496,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4522,7 +4522,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCnegUSPTToOFCnegUSMD
@@ -4570,7 +4570,7 @@ Com: {
GType: InhibitoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
PrjnScale: {
- Rel: 1 Abs: 2
+ Rel: 1 Abs: 3
}
SWt: {
Init: { SPct: 0 Mean: 0.75 Var: 0 Sym: false }
@@ -4597,13 +4597,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4623,7 +4623,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.05 DecayTau: 50
@@ -4801,13 +4801,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4827,7 +4827,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -4945,13 +4945,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -4971,7 +4971,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -5069,13 +5069,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5095,7 +5095,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCposValMDToOFCposValPT
@@ -5170,13 +5170,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5196,7 +5196,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: OFCposValPTToOFCposValMD
@@ -5244,7 +5244,7 @@ Com: {
GType: InhibitoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
PrjnScale: {
- Rel: 1 Abs: 2
+ Rel: 1 Abs: 3
}
SWt: {
Init: { SPct: 0 Mean: 0.75 Var: 0 Sym: false }
@@ -5271,13 +5271,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5297,7 +5297,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.05 DecayTau: 50
@@ -5455,13 +5455,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5481,7 +5481,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -5599,13 +5599,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.008 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.009 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5625,7 +5625,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 2 DecayTau: 50
@@ -5723,13 +5723,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.01 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.01 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5749,7 +5749,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: ACCnegValMDToACCnegValPT
@@ -5824,13 +5824,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5850,7 +5850,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 1 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: ACCnegValPTToACCnegValMD
@@ -5898,7 +5898,7 @@ Com: {
GType: InhibitoryG Delay: 2 MaxDelay: 2 PFail: 0 PFailSWt: false DelLen: 3
}
PrjnScale: {
- Rel: 1 Abs: 2
+ Rel: 1 Abs: 3
}
SWt: {
Init: { SPct: 0 Mean: 0.75 Var: 0 Sym: false }
@@ -5925,13 +5925,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.1 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -5951,7 +5951,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
CT: {
GeGain: 0.05 DecayTau: 50
@@ -6109,13 +6109,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6135,7 +6135,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: TimeP
@@ -6150,13 +6150,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6176,7 +6176,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 53
@@ -6354,13 +6354,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6380,7 +6380,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: CSP
@@ -6395,13 +6395,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6421,7 +6421,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 1 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Pulv: {
DriveScale: 0.1 FullDriveAct: 0.6 DriveLayIdx: 55
@@ -6519,13 +6519,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -6545,5 +6545,5 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
diff --git a/examples/pvlv/params_good/params_layers.txt b/examples/pvlv/params_good/params_layers.txt
index 7a990a950..7bdb70702 100644
--- a/examples/pvlv/params_good/params_layers.txt
+++ b/examples/pvlv/params_good/params_layers.txt
@@ -77,8 +77,8 @@
Pool.Gi: 1.00
BLAPosExtD2 Nominal: 0.03
- Layer.Gi: 1.80 Params: Base:#BLAPosExtD2: 1.8
- Pool.Gi: 1.00 Params: #BLAPosExtD2: 1.0
+ Layer.Gi: 1.80 Params: #BLAPosExtD2: 1.8
+ Pool.Gi: 1.00 Params: Base:#BLAPosExtD2: 1.0
BLANegExtD1 Nominal: 0.03
Layer.Gi: 1.80
diff --git a/examples/pvlv/params_good/params_nondef.txt b/examples/pvlv/params_good/params_nondef.txt
index fdaa387a4..c285b860c 100644
--- a/examples/pvlv/params_good/params_nondef.txt
+++ b/examples/pvlv/params_good/params_nondef.txt
@@ -198,6 +198,7 @@ VsPatch.Inhib.Pool.FB: 0 // [Def: 0.5,1,4]
VsPatch.Learn.RLRate.SigmoidMin: 0.01 // [Def: 0.05,1]
VsPatch.VSPatch.Gain: 5 // [Def: 3]
VsPatch.VSPatch.ThrInit: 0.2 // [Def: 0.15]
+VsPatch.VSPatch.ThrLRate: 0.0001 // [Def: 0,0.002]
VsPatch.ACCnegValPTpToVsPatch.PrjnScale.Abs: 6 // [Def: 1]
VsPatch.ACCnegValPTpToVsPatch.SWts.Init.Mean: 0.1 // [Def: 0.5,0.4]
VsPatch.ACCnegValPTpToVsPatch.SWts.Init.Var: 0.05 // [Def: 0.25]
@@ -374,7 +375,7 @@ OFCposUSMD.OFCposUSToOFCposUSMD.SWts.Init.Mean: 0.75 // [Def: 0.5,0.4]
OFCposUSMD.OFCposUSToOFCposUSMD.SWts.Init.Var: 0 // [Def: 0.25]
OFCposUSMD.OFCposUSToOFCposUSMD.SWts.Init.Sym: false // [Def: true]
OFCposUSMD.OFCposUSToOFCposUSMD.SWts.Adapt.SigGain: 1 // [Def: 6]
-OFCposUSMD.VsGPiToOFCposUSMD.PrjnScale.Abs: 2 // [Def: 1]
+OFCposUSMD.VsGPiToOFCposUSMD.PrjnScale.Abs: 3 // [Def: 1]
OFCposUSMD.VsGPiToOFCposUSMD.SWts.Init.Mean: 0.75 // [Def: 0.5,0.4]
OFCposUSMD.VsGPiToOFCposUSMD.SWts.Init.Var: 0 // [Def: 0.25]
OFCposUSMD.VsGPiToOFCposUSMD.SWts.Init.Sym: false // [Def: true]
@@ -435,7 +436,7 @@ OFCnegUSMD.OFCnegUSToOFCnegUSMD.SWts.Init.Mean: 0.75 // [Def: 0.5,0.4]
OFCnegUSMD.OFCnegUSToOFCnegUSMD.SWts.Init.Var: 0 // [Def: 0.25]
OFCnegUSMD.OFCnegUSToOFCnegUSMD.SWts.Init.Sym: false // [Def: true]
OFCnegUSMD.OFCnegUSToOFCnegUSMD.SWts.Adapt.SigGain: 1 // [Def: 6]
-OFCnegUSMD.VsGPiToOFCnegUSMD.PrjnScale.Abs: 2 // [Def: 1]
+OFCnegUSMD.VsGPiToOFCnegUSMD.PrjnScale.Abs: 3 // [Def: 1]
OFCnegUSMD.VsGPiToOFCnegUSMD.SWts.Init.Mean: 0.75 // [Def: 0.5,0.4]
OFCnegUSMD.VsGPiToOFCnegUSMD.SWts.Init.Var: 0 // [Def: 0.25]
OFCnegUSMD.VsGPiToOFCnegUSMD.SWts.Init.Sym: false // [Def: true]
@@ -475,7 +476,7 @@ OFCposValMD.OFCposValToOFCposValMD.SWts.Init.Mean: 0.75 // [Def: 0.5,0.4]
OFCposValMD.OFCposValToOFCposValMD.SWts.Init.Var: 0 // [Def: 0.25]
OFCposValMD.OFCposValToOFCposValMD.SWts.Init.Sym: false // [Def: true]
OFCposValMD.OFCposValToOFCposValMD.SWts.Adapt.SigGain: 1 // [Def: 6]
-OFCposValMD.VsGPiToOFCposValMD.PrjnScale.Abs: 2 // [Def: 1]
+OFCposValMD.VsGPiToOFCposValMD.PrjnScale.Abs: 3 // [Def: 1]
OFCposValMD.VsGPiToOFCposValMD.SWts.Init.Mean: 0.75 // [Def: 0.5,0.4]
OFCposValMD.VsGPiToOFCposValMD.SWts.Init.Var: 0 // [Def: 0.25]
OFCposValMD.VsGPiToOFCposValMD.SWts.Init.Sym: false // [Def: true]
@@ -515,7 +516,7 @@ ACCnegValMD.ACCnegValToACCnegValMD.SWts.Init.Mean: 0.75 // [Def: 0.5,0.4]
ACCnegValMD.ACCnegValToACCnegValMD.SWts.Init.Var: 0 // [Def: 0.25]
ACCnegValMD.ACCnegValToACCnegValMD.SWts.Init.Sym: false // [Def: true]
ACCnegValMD.ACCnegValToACCnegValMD.SWts.Adapt.SigGain: 1 // [Def: 6]
-ACCnegValMD.VsGPiToACCnegValMD.PrjnScale.Abs: 2 // [Def: 1]
+ACCnegValMD.VsGPiToACCnegValMD.PrjnScale.Abs: 3 // [Def: 1]
ACCnegValMD.VsGPiToACCnegValMD.SWts.Init.Mean: 0.75 // [Def: 0.5,0.4]
ACCnegValMD.VsGPiToACCnegValMD.SWts.Init.Var: 0 // [Def: 0.25]
ACCnegValMD.VsGPiToACCnegValMD.SWts.Init.Sym: false // [Def: true]
diff --git a/examples/pvlv/params_good/params_prjns.txt b/examples/pvlv/params_good/params_prjns.txt
index abbbdebfd..79b69302e 100644
--- a/examples/pvlv/params_good/params_prjns.txt
+++ b/examples/pvlv/params_good/params_prjns.txt
@@ -99,15 +99,15 @@ Layer: VsPatch
ACCnegValPTp VSPatchPrjn Abs: 6.00 Rel: 1.00 GScale: 0.30 Rel: 0.20
Abs Params: Base:.VSPatchPrjn: 6
Drives VSPatchPrjn Abs: 6.00 Rel: 1.00 GScale: 6.00 Rel: 1.00
- Abs Params: Base:.VSPatchPrjn: 6
+ Abs Params: .VSPatchPrjn: 6
OFCposUSPTp VSPatchPrjn Abs: 6.00 Rel: 1.00 GScale: 0.20 Rel: 0.20
Abs Params: Base:.VSPatchPrjn: 6
OFCposValPTp VSPatchPrjn Abs: 6.00 Rel: 1.00 GScale: 0.30 Rel: 0.20
Abs Params: Base:.VSPatchPrjn: 6
OFCnegUSPTp VSPatchPrjn Abs: 6.00 Rel: 1.00 GScale: 0.11 Rel: 0.20
- Abs Params: Base:.VSPatchPrjn: 6
- ACCnegValPTp VSPatchPrjn Abs: 6.00 Rel: 1.00 GScale: 0.30 Rel: 0.20
Abs Params: .VSPatchPrjn: 6
+ ACCnegValPTp VSPatchPrjn Abs: 6.00 Rel: 1.00 GScale: 0.30 Rel: 0.20
+ Abs Params: Base:.VSPatchPrjn: 6
Layer: SC
CS ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 1.00 Rel: 1.00
@@ -183,8 +183,8 @@ Layer: OFCposUSMD
OFCposUSPT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.50 Rel: 0.50
OFCposUS ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 2.00 Rel: 0.50
Abs Params: Base:.SuperToThal: 4.0
- VsGPi InhibPrjn Abs: 2.00 Rel: 1.00 GScale: 0.08 Rel: 1.00
- Abs Params: Base:.GPiToBGThal: 2
+ VsGPi InhibPrjn Abs: 3.00 Rel: 1.00 GScale: 0.12 Rel: 1.00
+ Abs Params: Base:.GPiToBGThal: 3
Layer: OFCposUSPTp
OFCposUSPT CTCtxtPrjn Abs: 6.00 Rel: 1.00 GScale: 0.15 Rel: 0.22
@@ -234,8 +234,8 @@ Layer: OFCnegUSMD
OFCnegUSPT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.50 Rel: 0.50
OFCnegUS ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 2.00 Rel: 0.50
Abs Params: Base:.SuperToThal: 4.0
- VsGPi InhibPrjn Abs: 2.00 Rel: 1.00 GScale: 0.08 Rel: 1.00
- Abs Params: Base:.GPiToBGThal: 2
+ VsGPi InhibPrjn Abs: 3.00 Rel: 1.00 GScale: 0.12 Rel: 1.00
+ Abs Params: Base:.GPiToBGThal: 3
Layer: OFCnegUSPTp
OFCnegUSPT CTCtxtPrjn Abs: 6.00 Rel: 1.00 GScale: 0.15 Rel: 0.23
@@ -271,8 +271,8 @@ Layer: OFCposValMD
OFCposValPT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.50 Rel: 0.50
OFCposVal ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 2.00 Rel: 0.50
Abs Params: Base:.SuperToThal: 4.0
- VsGPi InhibPrjn Abs: 2.00 Rel: 1.00 GScale: 0.08 Rel: 1.00
- Abs Params: Base:.GPiToBGThal: 2
+ VsGPi InhibPrjn Abs: 3.00 Rel: 1.00 GScale: 0.12 Rel: 1.00
+ Abs Params: Base:.GPiToBGThal: 3
Layer: OFCposValPTp
OFCposValPT CTCtxtPrjn Abs: 6.00 Rel: 1.00 GScale: 0.13 Rel: 0.23
@@ -307,8 +307,8 @@ Layer: ACCnegValMD
ACCnegValPT ForwardPrjn Abs: 1.00 Rel: 1.00 GScale: 0.50 Rel: 0.50
ACCnegVal ForwardPrjn Abs: 4.00 Rel: 1.00 GScale: 2.00 Rel: 0.50
Abs Params: Base:.SuperToThal: 4.0
- VsGPi InhibPrjn Abs: 2.00 Rel: 1.00 GScale: 0.08 Rel: 1.00
- Abs Params: Base:.GPiToBGThal: 2
+ VsGPi InhibPrjn Abs: 3.00 Rel: 1.00 GScale: 0.12 Rel: 1.00
+ Abs Params: Base:.GPiToBGThal: 3
Layer: ACCnegValPTp
ACCnegValPT CTCtxtPrjn Abs: 6.00 Rel: 1.00 GScale: 0.13 Rel: 0.23
diff --git a/examples/pvlv/pvlv.go b/examples/pvlv/pvlv.go
index e0510b9e0..7a64ce43f 100644
--- a/examples/pvlv/pvlv.go
+++ b/examples/pvlv/pvlv.go
@@ -8,6 +8,8 @@ in the amygdala, ventral striatum and associated areas.
*/
package main
+//go:generate goki generate -add-types
+
import (
"fmt"
"log"
@@ -15,30 +17,30 @@ import (
"github.com/emer/axon/axon"
"github.com/emer/axon/examples/pvlv/cond"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/minmax"
- "github.com/emer/etable/split"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/ki/ki"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/empi/v2/mpi"
"github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/etable/v2/split"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/goosi/events"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -46,7 +48,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -62,37 +64,37 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.NetParams `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -717,17 +719,17 @@ func (ss *Sim) BlockStats() {
if ss.Config.GUI {
plt := ss.GUI.Plots[etime.ScopeKey(stnm)]
plt.SetTable(dt)
- plt.Update()
+ plt.GoUpdatePlot()
}
}
////////////////////////////////////////////////////////////////////////////////////////////
// Gui
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "Axon PVLV"
- ss.GUI.MakeWindow(ss, "pvlv", title, `This is the PVLV test model in Axon. See emergent on GitHub.`)
+ ss.GUI.MakeBody(ss, "pvlv", title, `This is the PVLV test model in Axon. See emergent on GitHub.`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -737,8 +739,8 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.ViewUpdt.Config(nv, etime.Phase, etime.Phase)
ss.GUI.ViewUpdt = &ss.ViewUpdt
- nv.Scene().Camera.Pose.Pos.Set(0, 1.4, 2.6)
- nv.Scene().Camera.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1.4, 2.6)
+ nv.SceneXYZ().Camera.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
ss.GUI.AddPlots(title, &ss.Logs)
@@ -746,85 +748,87 @@ func (ss *Sim) ConfigGui() *gi.Window {
stnm := "BlockByType"
dt := ss.Logs.MiscTable(stnm)
- plt := ss.GUI.TabView.AddNewTab(eplot.KiT_Plot2D, stnm+" Plot").(*eplot.Plot2D)
+ plt := eplot.NewPlot2D(ss.GUI.Tabs.NewTab(stnm + " Plot"))
ss.GUI.Plots[etime.ScopeKey(stnm)] = plt
plt.Params.Title = stnm
plt.Params.XAxisCol = "TrialType"
plt.SetTable(dt)
- cb := gi.AddNewComboBox(ss.GUI.ToolBar, "runs")
- cb.ItemsFromStringList(cond.RunNames, false, 50)
- ri := 0
- for i, rn := range cond.RunNames {
- if rn == ss.Config.Env.RunName {
- ri = i
- break
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ cb := gi.NewChooser(tb, "runs")
+ cb.SetStrings(cond.RunNames, false, 50)
+ ri := 0
+ for i, rn := range cond.RunNames {
+ if rn == ss.Config.Env.RunName {
+ ri = i
+ break
+ }
}
- }
- cb.SelectItem(ri)
- cb.ComboSig.Connect(ss.GUI.Win.This(), func(recv, send ki.Ki, sig int64, data any) {
- ss.Config.Env.RunName = data.(string)
- ss.InitEnvRun()
- })
+ cb.SelectItem(ri)
+ cb.OnChange(func(e events.Event) {
+ ss.Config.Env.RunName = cb.CurVal.(string)
+ ss.InitEnvRun()
+ })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train})
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train})
- ss.GUI.ToolBar.AddSeparator("wts")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Save Wts", Icon: "file-save",
- Tooltip: "Save weights for the current condition name.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.SaveCondWeights()
- // ss.GUI.UpdateWindow()
- },
- })
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Save Wts", Icon: "file-save",
+ Tooltip: "Save weights for the current condition name.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.SaveCondWeights()
+ // ss.GUI.UpdateWindow()
+ },
+ })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Plot Drive & Effort",
- Icon: "play",
- Tooltip: "Opens a new window to plot PVLV Drive and Effort dynamics.",
- Active: egui.ActiveAlways,
- Func: func() {
- go DriveEffortGUI()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/pvlv/README.md")
- },
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Plot Drive & Effort",
+ Icon: "play",
+ Tooltip: "Opens a new window to plot PVLV Drive and Effort dynamics.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ go DriveEffortGUI()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/pvlv/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
@@ -835,13 +839,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/pvlv/pvlv_test.go b/examples/pvlv/pvlv_test.go
index ef68e2858..5be21c1c8 100644
--- a/examples/pvlv/pvlv_test.go
+++ b/examples/pvlv/pvlv_test.go
@@ -5,7 +5,7 @@ import (
"os"
"testing"
- "github.com/emer/emergent/etime"
+ "github.com/emer/emergent/v2/etime"
)
// basic pos acq, ext
diff --git a/examples/ra25/gtigen.go b/examples/ra25/gtigen.go
new file mode 100644
index 000000000..54caf92a6
--- /dev/null
+++ b/examples/ra25/gtigen.go
@@ -0,0 +1,123 @@
+// Code generated by "goki generate"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.ParamConfig",
+ ShortName: "main.ParamConfig",
+ IDName: "param-config",
+ Doc: "ParamConfig has config parameters related to sim params",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Network", >i.Field{Name: "Network", Type: "map[string]any", LocalType: "map[string]any", Doc: "network parameters", Directives: gti.Directives{}, Tag: ""}},
+ {"Hidden1Size", >i.Field{Name: "Hidden1Size", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "size of hidden layer -- can use emer.LaySize for 4D layers", Directives: gti.Directives{}, Tag: "def:\"{'X':10,'Y':10}\" nest:\"+\""}},
+ {"Hidden2Size", >i.Field{Name: "Hidden2Size", Type: "github.com/emer/emergent/v2/evec.Vec2i", LocalType: "evec.Vec2i", Doc: "size of hidden layer -- can use emer.LaySize for 4D layers", Directives: gti.Directives{}, Tag: "def:\"{'X':10,'Y':10}\" nest:\"+\""}},
+ {"Sheet", >i.Field{Name: "Sheet", Type: "string", LocalType: "string", Doc: "Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params", Directives: gti.Directives{}, Tag: ""}},
+ {"Tag", >i.Field{Name: "Tag", Type: "string", LocalType: "string", Doc: "extra tag to add to file names and logs saved from this run", Directives: gti.Directives{}, Tag: ""}},
+ {"Note", >i.Field{Name: "Note", Type: "string", LocalType: "string", Doc: "user note -- describe the run params etc -- like a git commit message for the run", Directives: gti.Directives{}, Tag: ""}},
+ {"File", >i.Field{Name: "File", Type: "string", LocalType: "string", Doc: "Name of the JSON file to input saved parameters from.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"SaveAll", >i.Field{Name: "SaveAll", Type: "bool", LocalType: "bool", Doc: "Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ {"Good", >i.Field{Name: "Good", Type: "bool", LocalType: "bool", Doc: "for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.", Directives: gti.Directives{}, Tag: "nest:\"+\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.RunConfig",
+ ShortName: "main.RunConfig",
+ IDName: "run-config",
+ Doc: "RunConfig has config parameters related to running the sim",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"GPU", >i.Field{Name: "GPU", Type: "bool", LocalType: "bool", Doc: "use the GPU for computation -- generally faster even for small models if NData ~16", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"NData", >i.Field{Name: "NData", Type: "int", LocalType: "int", Doc: "number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.", Directives: gti.Directives{}, Tag: "def:\"16\" min:\"1\""}},
+ {"NThreads", >i.Field{Name: "NThreads", Type: "int", LocalType: "int", Doc: "number of parallel threads for CPU computation -- 0 = use default", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"Run", >i.Field{Name: "Run", Type: "int", LocalType: "int", Doc: "starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1", Directives: gti.Directives{}, Tag: "def:\"0\""}},
+ {"NRuns", >i.Field{Name: "NRuns", Type: "int", LocalType: "int", Doc: "total number of runs to do when running Train", Directives: gti.Directives{}, Tag: "def:\"5\" min:\"1\""}},
+ {"NEpochs", >i.Field{Name: "NEpochs", Type: "int", LocalType: "int", Doc: "total number of epochs per run", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"NZero", >i.Field{Name: "NZero", Type: "int", LocalType: "int", Doc: "stop run after this number of perfect, zero-error epochs", Directives: gti.Directives{}, Tag: "def:\"2\""}},
+ {"NTrials", >i.Field{Name: "NTrials", Type: "int", LocalType: "int", Doc: "total number of trials per epoch. Should be an even multiple of NData.", Directives: gti.Directives{}, Tag: "def:\"32\""}},
+ {"TestInterval", >i.Field{Name: "TestInterval", Type: "int", LocalType: "int", Doc: "how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing", Directives: gti.Directives{}, Tag: "def:\"5\""}},
+ {"PCAInterval", >i.Field{Name: "PCAInterval", Type: "int", LocalType: "int", Doc: "how frequently (in epochs) to compute PCA on hidden representations to measure variance?", Directives: gti.Directives{}, Tag: "def:\"5\""}},
+ {"StartWts", >i.Field{Name: "StartWts", Type: "string", LocalType: "string", Doc: "if non-empty, is the name of weights file to load at start of first run -- for testing", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.LogConfig",
+ ShortName: "main.LogConfig",
+ IDName: "log-config",
+ Doc: "LogConfig has config parameters related to logging data",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SaveWts", >i.Field{Name: "SaveWts", Type: "bool", LocalType: "bool", Doc: "if true, save final weights after each run", Directives: gti.Directives{}, Tag: ""}},
+ {"Epoch", >i.Field{Name: "Epoch", Type: "bool", LocalType: "bool", Doc: "if true, save train epoch log to file, as .epc.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Run", >i.Field{Name: "Run", Type: "bool", LocalType: "bool", Doc: "if true, save run log to file, as .run.tsv typically", Directives: gti.Directives{}, Tag: "def:\"true\" nest:\"+\""}},
+ {"Trial", >i.Field{Name: "Trial", Type: "bool", LocalType: "bool", Doc: "if true, save train trial log to file, as .trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestEpoch", >i.Field{Name: "TestEpoch", Type: "bool", LocalType: "bool", Doc: "if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"TestTrial", >i.Field{Name: "TestTrial", Type: "bool", LocalType: "bool", Doc: "if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.", Directives: gti.Directives{}, Tag: "def:\"false\" nest:\"+\""}},
+ {"NetData", >i.Field{Name: "NetData", Type: "bool", LocalType: "bool", Doc: "if true, save network activation etc data from testing trials, for later viewing in netview", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Config",
+ ShortName: "main.Config",
+ IDName: "config",
+ Doc: "Config is a standard Sim config -- use as a starting point.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Includes", >i.Field{Name: "Includes", Type: "[]string", LocalType: "[]string", Doc: "specify include files here, and after configuration, it contains list of include files added", Directives: gti.Directives{}, Tag: ""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "bool", LocalType: "bool", Doc: "open the GUI -- does not automatically run -- if false, then runs automatically and quits", Directives: gti.Directives{}, Tag: "def:\"true\""}},
+ {"Debug", >i.Field{Name: "Debug", Type: "bool", LocalType: "bool", Doc: "log debugging information", Directives: gti.Directives{}, Tag: ""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/axon/examples/ra25.ParamConfig", LocalType: "ParamConfig", Doc: "parameter related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Run", >i.Field{Name: "Run", Type: "github.com/emer/axon/examples/ra25.RunConfig", LocalType: "RunConfig", Doc: "sim running related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ {"Log", >i.Field{Name: "Log", Type: "github.com/emer/axon/examples/ra25.LogConfig", LocalType: "LogConfig", Doc: "data logging related configuration options", Directives: gti.Directives{}, Tag: "view:\"add-fields\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim encapsulates the entire simulation model, and we define all the\nfunctionality as methods on this struct. This structure keeps all relevant\nstate information organized and available without having to pass everything around\nas arguments to methods, and provides the core GUI interface (note the view tags\nfor the fields which provide hints to how things should be displayed).",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Config", >i.Field{Name: "Config", Type: "github.com/emer/axon/examples/ra25.Config", LocalType: "Config", Doc: "simulation configuration parameters -- set by .toml config file and / or args", Directives: gti.Directives{}, Tag: ""}},
+ {"Net", >i.Field{Name: "Net", Type: "*github.com/emer/axon/axon.Network", LocalType: "*axon.Network", Doc: "the network -- click to view / edit parameters for layers, prjns, etc", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Params", >i.Field{Name: "Params", Type: "github.com/emer/emergent/v2/emer.NetParams", LocalType: "emer.NetParams", Doc: "network parameter management", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Loops", >i.Field{Name: "Loops", Type: "*github.com/emer/emergent/v2/looper.Manager", LocalType: "*looper.Manager", Doc: "contains looper control loops for running sim", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Stats", >i.Field{Name: "Stats", Type: "github.com/emer/emergent/v2/estats.Stats", LocalType: "estats.Stats", Doc: "contains computed statistic values", Directives: gti.Directives{}, Tag: ""}},
+ {"Logs", >i.Field{Name: "Logs", Type: "github.com/emer/emergent/v2/elog.Logs", LocalType: "elog.Logs", Doc: "Contains all the logs and information about the logs.'", Directives: gti.Directives{}, Tag: ""}},
+ {"Pats", >i.Field{Name: "Pats", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "the training patterns to use", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Envs", >i.Field{Name: "Envs", Type: "github.com/emer/emergent/v2/env.Envs", LocalType: "env.Envs", Doc: "Environments", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Context", >i.Field{Name: "Context", Type: "github.com/emer/axon/axon.Context", LocalType: "axon.Context", Doc: "axon timing parameters and state", Directives: gti.Directives{}, Tag: ""}},
+ {"ViewUpdt", >i.Field{Name: "ViewUpdt", Type: "github.com/emer/emergent/v2/netview.ViewUpdt", LocalType: "netview.ViewUpdt", Doc: "netview update parameters", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"GUI", >i.Field{Name: "GUI", Type: "github.com/emer/emergent/v2/egui.GUI", LocalType: "egui.GUI", Doc: "manages all the gui elements", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"RndSeeds", >i.Field{Name: "RndSeeds", Type: "github.com/emer/emergent/v2/erand.Seeds", LocalType: "erand.Seeds", Doc: "a list of random seeds to use for each run", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/examples/ra25/params.go b/examples/ra25/params.go
index 6d80d1043..ce366033a 100644
--- a/examples/ra25/params.go
+++ b/examples/ra25/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets sets the minimal non-default params
diff --git a/examples/ra25/params_good/config.toml b/examples/ra25/params_good/config.toml
index a14d51d4a..5202946ce 100644
--- a/examples/ra25/params_good/config.toml
+++ b/examples/ra25/params_good/config.toml
@@ -16,8 +16,8 @@ Debug = false
Y = 10
[Run]
- GPU = true
- NData = 16
+ GPU = false
+ NData = 1
NThreads = 0
Run = 0
NRuns = 5
diff --git a/examples/ra25/params_good/params_all.txt b/examples/ra25/params_good/params_all.txt
index e0f3795aa..13e396238 100644
--- a/examples/ra25/params_good/params_all.txt
+++ b/examples/ra25/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: Hidden1
@@ -52,13 +52,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -78,7 +78,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -136,13 +136,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -162,7 +162,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -220,13 +220,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -246,7 +246,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: Hidden2ToOutput
diff --git a/examples/ra25/ra25.go b/examples/ra25/ra25.go
index 94f4eee8e..52ede6cbb 100644
--- a/examples/ra25/ra25.go
+++ b/examples/ra25/ra25.go
@@ -8,30 +8,32 @@
// defined over 5x5 input / output layers (i.e., 25 units)
package main
+//go:generate goki generate -add-types
+
import (
"log"
"os"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/evec"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/evec"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -39,7 +41,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -51,115 +53,115 @@ func main() {
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
- // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers
- Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"`
+ // size of hidden layer -- can use emer.LaySize for 4D layers
+ Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"`
- // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers
- Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"`
+ // size of hidden layer -- can use emer.LaySize for 4D layers
+ Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"`
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"true"`
- // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
- NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."`
+ // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
+ NData int `def:"16" min:"1"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 5] [min: 1] total number of runs to do when running Train
- NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"5" min:"1"`
- // [def: 100] total number of epochs per run
- NEpochs int `def:"100" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"100"`
- // [def: 2] stop run after this number of perfect, zero-error epochs
- NZero int `def:"2" desc:"stop run after this number of perfect, zero-error epochs"`
+ // stop run after this number of perfect, zero-error epochs
+ NZero int `def:"2"`
- // [def: 32] total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `def:"32" desc:"total number of trials per epoch. Should be an even multiple of NData."`
+ // total number of trials per epoch. Should be an even multiple of NData.
+ NTrials int `def:"32"`
- // [def: 5] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
- TestInterval int `def:"5" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"`
+ // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
+ TestInterval int `def:"5"`
- // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance?
- PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"`
+ // how frequently (in epochs) to compute PCA on hidden representations to measure variance?
+ PCAInterval int `def:"5"`
// if non-empty, is the name of weights file to load at start of first run -- for testing
- StartWts string `desc:"if non-empty, is the name of weights file to load at start of first run -- for testing"`
+ StartWts string
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
- // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."`
+ // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
+ TestEpoch bool `def:"false" nest:"+"`
- // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."`
+ // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
+ TestTrial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
type Config struct {
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
@@ -172,40 +174,40 @@ func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] network parameter management
- Params emer.NetParams `view:"inline" desc:"network parameter management"`
+ // network parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] the training patterns to use
- Pats *etable.Table `view:"no-inline" desc:"the training patterns to use"`
+ // the training patterns to use
+ Pats *etable.Table `view:"no-inline"`
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -649,10 +651,10 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
////////////////////////////////////////////////////////////////////////////////////////////
// Gui
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "Axon Random Associator"
- ss.GUI.MakeWindow(ss, "ra25", title, `This demonstrates a basic Axon model. See emergent on GitHub.`)
+ ss.GUI.MakeBody(ss, "ra25", title, `This demonstrates a basic Axon model. See emergent on GitHub.`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -661,50 +663,52 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.ViewUpdt.Config(nv, etime.Phase, etime.Phase)
ss.GUI.ViewUpdt = &ss.ViewUpdt
- nv.Scene().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
- nv.Scene().Camera.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
+ nv.SceneXYZ().Camera.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
ss.GUI.AddPlots(title, &ss.Logs)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train, etime.Test})
-
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/ra25/README.md")
- },
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train, etime.Test})
+
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/ra25/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
@@ -714,13 +718,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/ra25/weights_test.go b/examples/ra25/weights_test.go
index e99292b14..774486dd3 100644
--- a/examples/ra25/weights_test.go
+++ b/examples/ra25/weights_test.go
@@ -7,8 +7,8 @@ import (
"os"
"testing"
- "github.com/emer/emergent/etime"
- "github.com/goki/gi/gi"
+ "github.com/emer/emergent/v2/etime"
+ "goki.dev/gi/v2/gi"
)
func TestWeightsSave(t *testing.T) {
diff --git a/examples/ra25x/config.go b/examples/ra25x/config.go
index 6b777b113..a9f774a4f 100644
--- a/examples/ra25x/config.go
+++ b/examples/ra25x/config.go
@@ -4,121 +4,121 @@
package main
-import "github.com/emer/emergent/evec"
+import "github.com/emer/emergent/v2/evec"
// ParamConfig has config parameters related to sim params
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
- // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers
- Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"`
+ // size of hidden layer -- can use emer.LaySize for 4D layers
+ Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"`
- // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers
- Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"`
+ // size of hidden layer -- can use emer.LaySize for 4D layers
+ Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"`
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16
- GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"`
+ // use the GPU for computation -- generally faster even for small models if NData ~16
+ GPU bool `def:"true"`
- // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
- NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."`
+ // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning.
+ NData int `def:"16" min:"1"`
- // [def: 0] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"0"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 5] [min: 1] total number of runs to do when running Train
- NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"5" min:"1"`
- // [def: 1000] total number of epochs per run
- NEpochs int `def:"1000" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"1000"`
- // [def: 2] stop run after this number of perfect, zero-error epochs
- NZero int `def:"2" desc:"stop run after this number of perfect, zero-error epochs"`
+ // stop run after this number of perfect, zero-error epochs
+ NZero int `def:"2"`
- // [def: 32] total number of trials per epoch. Should be an even multiple of NData.
- NTrials int `def:"32" desc:"total number of trials per epoch. Should be an even multiple of NData."`
+ // total number of trials per epoch. Should be an even multiple of NData.
+ NTrials int `def:"32"`
- // [def: 5] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
- TestInterval int `def:"5" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"`
+ // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing
+ TestInterval int `def:"5"`
- // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance?
- PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"`
+ // how frequently (in epochs) to compute PCA on hidden representations to measure variance?
+ PCAInterval int `def:"5"`
// if non-empty, is the name of weights file to load at start of first run -- for testing
- StartWts string `desc:"if non-empty, is the name of weights file to load at start of first run -- for testing"`
+ StartWts string
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
- // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
- TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."`
+ // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there.
+ TestEpoch bool `def:"false" nest:"+"`
- // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
- TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."`
+ // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large.
+ TestTrial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
type Config struct {
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/ra25x/params.go b/examples/ra25x/params.go
index f45cf278b..704c994c1 100644
--- a/examples/ra25x/params.go
+++ b/examples/ra25x/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets sets the minimal non-default params
diff --git a/examples/ra25x/params_good/params_all.txt b/examples/ra25x/params_good/params_all.txt
index 9a6f8faa3..c0e757369 100644
--- a/examples/ra25x/params_good/params_all.txt
+++ b/examples/ra25x/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 1 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: Hidden1
@@ -52,13 +52,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -78,7 +78,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -136,13 +136,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -162,7 +162,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
Burst: {
ThrRel: 0.1 ThrAbs: 0.1
@@ -220,13 +220,13 @@ Act: {
Clamp: { IsInput: false IsTarget: true Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.2 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 1 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -246,7 +246,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.0002 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: Hidden2ToOutput
diff --git a/examples/ra25x/ra25x.go b/examples/ra25x/ra25x.go
index 15eceddde..88c21004f 100644
--- a/examples/ra25x/ra25x.go
+++ b/examples/ra25x/ra25x.go
@@ -8,32 +8,34 @@
// defined over 5x5 input / output layers (i.e., 25 units)
package main
+//go:generate goki generate -add-types
+
import (
"log"
"os"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/empi/mpi"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/minmax"
- "github.com/emer/etable/tsragg"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/etable/v2/tsragg"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/mat32/v2"
)
func main() {
@@ -41,7 +43,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -57,40 +59,40 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] network parameter management
- Params emer.NetParams `view:"inline" desc:"network parameter management"`
+ // network parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] the training patterns to use
- Pats *etable.Table `view:"no-inline" desc:"the training patterns to use"`
+ // the training patterns to use
+ Pats *etable.Table `view:"no-inline"`
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -614,10 +616,10 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
////////////////////////////////////////////////////////////////////////////////////////////
// Gui
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "Axon Random Associator"
- ss.GUI.MakeWindow(ss, "ra25x", title, `This demonstrates a basic Axon model. See emergent on GitHub.`)
+ ss.GUI.MakeBody(ss, "ra25x", title, `This demonstrates a basic Axon model. See emergent on GitHub.`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -626,50 +628,52 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.ViewUpdt.Config(nv, etime.Phase, etime.Phase)
ss.GUI.ViewUpdt = &ss.ViewUpdt
- nv.Scene().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
- nv.Scene().Camera.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
+ nv.SceneXYZ().Camera.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
ss.GUI.AddPlots(title, &ss.Logs)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train, etime.Test})
-
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("log")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog",
- Icon: "reset",
- Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Run)
- ss.GUI.UpdatePlot(etime.Train, etime.Run)
- },
- })
- ////////////////////////////////////////////////
- ss.GUI.ToolBar.AddSeparator("misc")
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed",
- Icon: "new",
- Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
- Active: egui.ActiveAlways,
- Func: func() {
- ss.RndSeeds.NewSeeds()
- },
- })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/ra25x/README.md")
- },
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train, etime.Test})
+
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset RunLog",
+ Icon: "reset",
+ Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Run)
+ ss.GUI.UpdatePlot(etime.Train, etime.Run)
+ },
+ })
+ ////////////////////////////////////////////////
+ gi.NewSeparator(tb)
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "New Seed",
+ Icon: "new",
+ Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ ss.RndSeeds.NewSeeds()
+ },
+ })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/ra25x/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
@@ -679,13 +683,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/examples/rl/cond_env.go b/examples/rl/cond_env.go
index 9ce91e6e6..5a22eb2b6 100644
--- a/examples/rl/cond_env.go
+++ b/examples/rl/cond_env.go
@@ -8,37 +8,37 @@ import (
"fmt"
"math/rand"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/etable/v2/etensor"
)
// OnOff represents stimulus On / Off timing
type OnOff struct {
// is this stimulus active -- use it?
- Act bool `desc:"is this stimulus active -- use it?"`
+ Act bool
// when stimulus turns on
- On int `desc:"when stimulus turns on"`
+ On int
// when stimulu turns off
- Off int `desc:"when stimulu turns off"`
+ Off int
// probability of being active on any given trial
- P float32 `desc:"probability of being active on any given trial"`
+ P float32
// variability in onset timing (max number of trials before/after On that it could start)
- OnVar int `desc:"variability in onset timing (max number of trials before/after On that it could start)"`
+ OnVar int
// variability in offset timing (max number of trials before/after Off that it could end)
- OffVar int `desc:"variability in offset timing (max number of trials before/after Off that it could end)"`
+ OffVar int
- // [view: -] current active status based on P probability
- CurAct bool `view:"-" desc:"current active status based on P probability"`
+ // current active status based on P probability
+ CurAct bool `view:"-"`
- // [view: -] current on / off values using Var variability
- CurOn, CurOff int `view:"-" desc:"current on / off values using Var variability"`
+ // current on / off values using Var variability
+ CurOn, CurOff int `view:"-"`
}
func (oo *OnOff) Set(act bool, on, off int) {
@@ -68,52 +68,52 @@ func (oo *OnOff) IsOn(tm int) bool {
type CondEnv struct {
// name of this environment
- Nm string `desc:"name of this environment"`
+ Nm string
// description of this environment
- Dsc string `desc:"description of this environment"`
+ Dsc string
// total time for trial
- TotTime int `desc:"total time for trial"`
+ TotTime int
- // [view: inline] Conditioned stimulus A (e.g., Tone)
- CSA OnOff `view:"inline" desc:"Conditioned stimulus A (e.g., Tone)"`
+ // Conditioned stimulus A (e.g., Tone)
+ CSA OnOff `view:"inline"`
- // [view: inline] Conditioned stimulus B (e.g., Light)
- CSB OnOff `view:"inline" desc:"Conditioned stimulus B (e.g., Light)"`
+ // Conditioned stimulus B (e.g., Light)
+ CSB OnOff `view:"inline"`
- // [view: inline] Conditioned stimulus C
- CSC OnOff `view:"inline" desc:"Conditioned stimulus C"`
+ // Conditioned stimulus C
+ CSC OnOff `view:"inline"`
- // [view: inline] Unconditioned stimulus -- reward
- US OnOff `view:"inline" desc:"Unconditioned stimulus -- reward"`
+ // Unconditioned stimulus -- reward
+ US OnOff `view:"inline"`
// value for reward
- RewVal float32 `desc:"value for reward"`
+ RewVal float32
// value for non-reward
- NoRewVal float32 `desc:"value for non-reward"`
+ NoRewVal float32
// one-hot input representation of current option
- Input etensor.Float64 `desc:"one-hot input representation of current option"`
+ Input etensor.Float64
// single reward value
- Reward etensor.Float64 `desc:"single reward value"`
+ Reward etensor.Float64
// true if a US reward value was set
- HasRew bool `desc:"true if a US reward value was set"`
+ HasRew bool
- // [view: inline] current run of model as provided during Init
- Run env.Ctr `view:"inline" desc:"current run of model as provided during Init"`
+ // current run of model as provided during Init
+ Run env.Ctr `view:"inline"`
- // [view: inline] number of times through Seq.Max number of sequences
- Epoch env.Ctr `view:"inline" desc:"number of times through Seq.Max number of sequences"`
+ // number of times through Seq.Max number of sequences
+ Epoch env.Ctr `view:"inline"`
- // [view: inline] one trial is a pass through all TotTime Events
- Trial env.Ctr `view:"inline" desc:"one trial is a pass through all TotTime Events"`
+ // one trial is a pass through all TotTime Events
+ Trial env.Ctr `view:"inline"`
- // [view: inline] event is one time step within Trial -- e.g., CS turning on, etc
- Event env.Ctr `view:"inline" desc:"event is one time step within Trial -- e.g., CS turning on, etc"`
+ // event is one time step within Trial -- e.g., CS turning on, etc
+ Event env.Ctr `view:"inline"`
}
func (ev *CondEnv) Name() string { return ev.Nm }
diff --git a/examples/rl/config.go b/examples/rl/config.go
index dd6bb5fba..8d321ba68 100644
--- a/examples/rl/config.go
+++ b/examples/rl/config.go
@@ -10,104 +10,104 @@ package main
type EnvConfig struct {
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
- Env map[string]any `desc:"env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"`
+ Env map[string]any
}
// ParamConfig has config parameters related to sim params
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params
- SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"`
+ SaveAll bool `nest:"+"`
// for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time.
- Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."`
+ Good bool `nest:"+"`
}
// RunConfig has config parameters related to running the sim
type RunConfig struct {
- // [def: false] use the GPU for computation -- only for testing in this model -- not faster
- GPU bool `def:"false" desc:"use the GPU for computation -- only for testing in this model -- not faster"`
+ // use the GPU for computation -- only for testing in this model -- not faster
+ GPU bool `def:"false"`
- // [def: 2] number of parallel threads for CPU computation -- 0 = use default
- NThreads int `def:"2" desc:"number of parallel threads for CPU computation -- 0 = use default"`
+ // number of parallel threads for CPU computation -- 0 = use default
+ NThreads int `def:"2"`
- // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
- Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"`
+ // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1
+ Run int `def:"0"`
- // [def: 1] [min: 1] total number of runs to do when running Train
- NRuns int `def:"1" min:"1" desc:"total number of runs to do when running Train"`
+ // total number of runs to do when running Train
+ NRuns int `def:"1" min:"1"`
- // [def: 100] total number of epochs per run
- NEpochs int `def:"100" desc:"total number of epochs per run"`
+ // total number of epochs per run
+ NEpochs int `def:"100"`
- // [def: 20] total number of trials per epoch -- should be number of ticks in env.
- NTrials int `def:"20" desc:"total number of trials per epoch -- should be number of ticks in env."`
+ // total number of trials per epoch -- should be number of ticks in env.
+ NTrials int `def:"20"`
}
// LogConfig has config parameters related to logging data
type LogConfig struct {
- // [def: ['DA','VSPatch']] stats to aggregate at higher levels
- AggStats []string `def:"['DA','VSPatch']" desc:"stats to aggregate at higher levels"`
+ // ] stats to aggregate at higher levels
+ AggStats []string `def:"['DA','VSPatch']"`
// if true, save final weights after each run
- SaveWts bool `desc:"if true, save final weights after each run"`
+ SaveWts bool
- // [def: true] if true, save train epoch log to file, as .epc.tsv typically
- Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"`
+ // if true, save train epoch log to file, as .epc.tsv typically
+ Epoch bool `def:"true" nest:"+"`
- // [def: true] if true, save run log to file, as .run.tsv typically
- Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"`
+ // if true, save run log to file, as .run.tsv typically
+ Run bool `def:"true" nest:"+"`
- // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large.
- Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."`
+ // if true, save train trial log to file, as .trl.tsv typically. May be large.
+ Trial bool `def:"false" nest:"+"`
// if true, save network activation etc data from testing trials, for later viewing in netview
- NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"`
+ NetData bool
}
// Config is a standard Sim config -- use as a starting point.
type Config struct {
// specify include files here, and after configuration, it contains list of include files added
- Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"`
+ Includes []string
// if true, use Rescorla-Wagner -- set in code or rebuild network
- RW bool `desc:"if true, use Rescorla-Wagner -- set in code or rebuild network"`
+ RW bool
- // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits
- GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"`
+ // open the GUI -- does not automatically run -- if false, then runs automatically and quits
+ GUI bool `def:"true"`
// log debugging information
- Debug bool `desc:"log debugging information"`
+ Debug bool
- // [view: add-fields] environment configuration options
- Env EnvConfig `view:"add-fields" desc:"environment configuration options"`
+ // environment configuration options
+ Env EnvConfig `view:"add-fields"`
- // [view: add-fields] parameter related configuration options
- Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"`
+ // parameter related configuration options
+ Params ParamConfig `view:"add-fields"`
- // [view: add-fields] sim running related configuration options
- Run RunConfig `view:"add-fields" desc:"sim running related configuration options"`
+ // sim running related configuration options
+ Run RunConfig `view:"add-fields"`
- // [view: add-fields] data logging related configuration options
- Log LogConfig `view:"add-fields" desc:"data logging related configuration options"`
+ // data logging related configuration options
+ Log LogConfig `view:"add-fields"`
}
func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes }
diff --git a/examples/rl/params.go b/examples/rl/params.go
index f5c1a58a6..930511125 100644
--- a/examples/rl/params.go
+++ b/examples/rl/params.go
@@ -5,8 +5,8 @@
package main
import (
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
)
// ParamSets is the default set of parameters -- Base is always applied, and others can be optionally
diff --git a/examples/rl/params_good/params_all.txt b/examples/rl/params_good/params_all.txt
index 2b5b9a225..7a14f80eb 100644
--- a/examples/rl/params_good/params_all.txt
+++ b/examples/rl/params_good/params_all.txt
@@ -11,13 +11,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -37,7 +37,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
/////////////////////////////////////////////////
Layer: RewPred
@@ -52,13 +52,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -78,7 +78,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
///////////////////////////////////////////////////
Prjn: InputToRewPred
@@ -116,13 +116,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -142,7 +142,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
TDInteg: {
Discount: 0.9 PredGain: 1 TDPredLayIdx: 1
@@ -160,13 +160,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -186,7 +186,7 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
TDDa: {
TonicGe: 0.3 TDIntegLayIdx: 2
@@ -204,13 +204,13 @@ Act: {
Clamp: { IsInput: false IsTarget: false Ge: 0.8 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -230,7 +230,7 @@ Learn: {
TrgAvgAct: { On: false GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 1 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
LDT: {
SrcThr: 0.05 Rew: true MaintInhib: 2 NotMaintMax: 0.4 SrcLay1Idx: -1 SrcLay2Idx: -1 SrcLay3Idx: -1 SrcLay4Idx: -1
@@ -248,13 +248,13 @@ Act: {
Clamp: { IsInput: true IsTarget: false Ge: 1.5 Add: false ErrThr: 0.5 }
Noise: { On: false GeHz: 100 Ge: 0.001 GiHz: 200 Gi: 0.001 }
VmRange: { Min: 0.1 Max: 1 }
- Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.2093637 DtMax: 0.001 }
+ Mahp: { Gbar: 0.02 Voff: -30 Vslope: 9 TauMax: 1000 Tadj: 3.209364 DtMax: 0.001 }
Sahp: { Gbar: 0.05 CaTau: 5 Off: 0.8 Slope: 0.02 TauMax: 1 CaDt: 0.2 DtMax: 1 }
KNa: { On: true TrialSlow: false Med: { On: true Rise: 0.02 Max: 0.2 Tau: 200 Dt: 0.005 } Slow: { On: true Rise: 0.001 Max: 0.2 Tau: 1000 Dt: 0.001
} }
NMDA: { Gbar: 0.006 Tau: 100 ITau: 1 MgC: 1.4 Voff: 0 }
MaintNMDA: { Gbar: 0.007 Tau: 200 ITau: 1 MgC: 1.4 Voff: 0 }
- GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811758 RiseDt: 0.022222223 DecayDt: 0.02 }
+ GabaB: { Gbar: 0.015 RiseTau: 45 DecayTau: 50 Gbase: 0.2 GiSpike: 10 MaxTime: 47.41225 TauFact: 2.5811756 RiseDt: 0.022222223 DecayDt: 0.02 }
VGCC: { Gbar: 0.02 Ca: 25 }
AK: { Gbar: 0.1 Hf: 0.076 Mf: 0.075 Voff: 2 Vmax: -37 }
SKCa: { Gbar: 0 C50: 0.5 ActTau: 15 DeTau: 30 KCaR: 0.8 CaRDecayTau: 150 CaInThr: 0.01 CaInTau: 50 }
@@ -274,5 +274,5 @@ Learn: {
TrgAvgAct: { On: true GiBaseInit: 0 ErrLRate: 0.02 SynScaleRate: 0.005 SubMean: 0 Permute: true Pool: true TrgRange: { Min: 0.5 Max: 2
} }
RLRate: { On: true SigmoidMin: 0.05 Diff: true SpkThr: 0.1 DiffThr: 0.02 Min: 0.001 }
- NeuroMod: { DAMod: 0 Valence: 0 DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
+ NeuroMod: { DAMod: NoDAMod Valence: Positive DAModGain: 0.5 DALRateSign: false DALRateMod: 0 AChLRateMod: 0 AChDisInhib: 0 BurstGain: 1 DipGain: 1 }
}
diff --git a/examples/rl/params_good/params_layers.txt b/examples/rl/params_good/params_layers.txt
index 3e6a6d1ae..9e88e65e9 100644
--- a/examples/rl/params_good/params_layers.txt
+++ b/examples/rl/params_good/params_layers.txt
@@ -2,10 +2,10 @@
Layer.Gi: 0.20 Params: Base:#Rew: 0.2
RewPred Nominal: 1.00 Params: .TDPredLayer: 1
- Layer.Gi: 0.20 Params: TD:.TDPredLayer: 0.2
+ Layer.Gi: 0.20 Params: .TDPredLayer: 0.2
RewInteg Nominal: 1.00 Params: .TDIntegLayer: 1
- Layer.Gi: 0.20 Params: .TDIntegLayer: 0.2
+ Layer.Gi: 0.20 Params: TD:.TDIntegLayer: 0.2
TD Nominal: 0.50
Layer.Gi: 1.00
diff --git a/examples/rl/rl.go b/examples/rl/rl.go
index 1e271a8ff..a7a5a9c32 100644
--- a/examples/rl/rl.go
+++ b/examples/rl/rl.go
@@ -7,26 +7,28 @@ rl_cond explores the temporal differences (TD) and Rescorla-Wagner reinforcement
*/
package main
+//go:generate goki generate -add-types
+
import (
"os"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
- "github.com/emer/empi/mpi"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
)
func main() {
@@ -34,7 +36,7 @@ func main() {
sim.New()
sim.ConfigAll()
if sim.Config.GUI {
- gimain.Main(sim.RunGUI)
+ gimain.Run(sim.RunGUI)
} else {
sim.RunNoGUI()
}
@@ -50,37 +52,37 @@ func main() {
type Sim struct {
// simulation configuration parameters -- set by .toml config file and / or args
- Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"`
+ Config Config
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: inline] all parameter management
- Params emer.NetParams `view:"inline" desc:"all parameter management"`
+ // all parameter management
+ Params emer.NetParams `view:"inline"`
- // [view: no-inline] contains looper control loops for running sim
- Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"`
+ // contains looper control loops for running sim
+ Loops *looper.Manager `view:"no-inline"`
// contains computed statistic values
- Stats estats.Stats `desc:"contains computed statistic values"`
+ Stats estats.Stats
// Contains all the logs and information about the logs.'
- Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"`
+ Logs elog.Logs
- // [view: no-inline] Environments
- Envs env.Envs `view:"no-inline" desc:"Environments"`
+ // Environments
+ Envs env.Envs `view:"no-inline"`
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
- // [view: inline] netview update parameters
- ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"`
+ // netview update parameters
+ ViewUpdt netview.ViewUpdt `view:"inline"`
- // [view: -] manages all the gui elements
- GUI egui.GUI `view:"-" desc:"manages all the gui elements"`
+ // manages all the gui elements
+ GUI egui.GUI `view:"-"`
- // [view: -] a list of random seeds to use for each run
- RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"`
+ // a list of random seeds to use for each run
+ RndSeeds erand.Seeds `view:"-"`
}
// New creates new blank elements and initializes defaults
@@ -401,10 +403,10 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) {
////////////////////////////////////////////////////////////////////////////////////////////
// Gui
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() {
title := "Reinforcement Learning"
- ss.GUI.MakeWindow(ss, "rl", title, `rl_cond explores the temporal differences (TD) reinforcement learning algorithm under some basic Pavlovian conditioning environments. See axon on GitHub.`)
+ ss.GUI.MakeBody(ss, "rl", title, `rl_cond explores the temporal differences (TD) reinforcement learning algorithm under some basic Pavlovian conditioning environments. See axon on GitHub.`)
ss.GUI.CycleUpdateInterval = 10
nv := ss.GUI.AddNetView("NetView")
@@ -414,36 +416,38 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.GUI.ViewUpdt = &ss.ViewUpdt
// nv.Scene().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
- // nv.Scene().Camera.LookAt(mat32.Vec3{0, 0, 0}, mat32.Vec3{0, 1, 0})
+ // nv.Scene().Camera.LookAt(mat32.V3(0, 0, 0), mat32.V3(0, 1, 0))
ss.GUI.AddPlots(title, &ss.Logs)
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Init", Icon: "update",
- Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
- Active: egui.ActiveStopped,
- Func: func() {
- ss.Init()
- ss.GUI.UpdateWindow()
- },
- })
+ ss.GUI.Body.AddAppBar(func(tb *gi.Toolbar) {
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Init", Icon: "update",
+ Tooltip: "Initialize everything including network weights, and start over. Also applies current params.",
+ Active: egui.ActiveStopped,
+ Func: func() {
+ ss.Init()
+ ss.GUI.UpdateWindow()
+ },
+ })
- ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train})
+ ss.GUI.AddLooperCtrl(tb, ss.Loops, []etime.Modes{etime.Train})
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset Trial Log", Icon: "update",
- Tooltip: "reset trial log .",
- Func: func() {
- ss.Logs.ResetLog(etime.Train, etime.Trial)
- ss.GUI.UpdatePlot(etime.Train, etime.Trial)
- },
- })
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "Reset Trial Log", Icon: "update",
+ Tooltip: "reset trial log .",
+ Func: func() {
+ ss.Logs.ResetLog(etime.Train, etime.Trial)
+ ss.GUI.UpdatePlot(etime.Train, etime.Trial)
+ },
+ })
- ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "README",
- Icon: "file-markdown",
- Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
- Active: egui.ActiveAlways,
- Func: func() {
- gi.OpenURL("https://github.com/emer/axon/blob/master/examples/rl/README.md")
- },
+ ss.GUI.AddToolbarItem(tb, egui.ToolbarItem{Label: "README",
+ Icon: "file-markdown",
+ Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
+ Active: egui.ActiveAlways,
+ Func: func() {
+ gi.OpenURL("https://github.com/emer/axon/blob/master/examples/rl/README.md")
+ },
+ })
})
ss.GUI.FinalizeGUI(false)
if ss.Config.Run.GPU {
@@ -452,13 +456,12 @@ func (ss *Sim) ConfigGui() *gi.Window {
ss.Net.GPU.Destroy()
})
}
- return ss.GUI.Win
}
func (ss *Sim) RunGUI() {
ss.Init()
- win := ss.ConfigGui()
- win.StartEventLoop()
+ ss.ConfigGUI()
+ ss.GUI.Body.NewWindow().Run().Wait()
}
func (ss *Sim) RunNoGUI() {
diff --git a/fffb/bg.go b/fffb/bg.go
index 5b5a75a34..56b74df53 100644
--- a/fffb/bg.go
+++ b/fffb/bg.go
@@ -9,16 +9,16 @@ package fffb
type Bg struct {
// enable adaptive layer inhibition gain as stored in layer GiCur value
- On bool `desc:"enable adaptive layer inhibition gain as stored in layer GiCur value"`
+ On bool
- // [def: .1] [viewif: On=true] level of inhibition as proporition of FFFB Gi value -- will need to reduce FFFB level to compensate for this additional source of inhibition
- Gi float32 `def:".1" viewif:"On=true" desc:"level of inhibition as proporition of FFFB Gi value -- will need to reduce FFFB level to compensate for this additional source of inhibition"`
+ // level of inhibition as proporition of FFFB Gi value -- will need to reduce FFFB level to compensate for this additional source of inhibition
+ Gi float32 `def:".1" viewif:"On=true"`
- // [def: 10] [viewif: On=true] time constant for integrating background inhibition (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life)
- Tau float32 `def:"10" viewif:"On=true" desc:"time constant for integrating background inhibition (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life)"`
+ // time constant for integrating background inhibition (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life)
+ Tau float32 `def:"10" viewif:"On=true"`
- // [view: -] rate = 1 / tau
- Dt float32 `inactive:"+" view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ Dt float32 `inactive:"+" view:"-" json:"-" xml:"-"`
}
func (bg *Bg) Update() {
diff --git a/fffb/fffb.go b/fffb/fffb.go
index 79baeac38..200c8adfb 100644
--- a/fffb/fffb.go
+++ b/fffb/fffb.go
@@ -12,39 +12,41 @@ active at any time, where k is typically 10-20 percent of N.
*/
package fffb
+//go:generate goki generate -add-types
+
// Params parameterizes feedforward (FF) and feedback (FB) inhibition (FFFB)
// based on average (or maximum) Ge (FF) and activation (FB)
type Params struct {
// enable this level of inhibition
- On bool `desc:"enable this level of inhibition"`
+ On bool
- // [def: 1.1] [viewif: On] [min: 0] [0.8-1.5 typical, can go lower or higher as needed] overall inhibition gain -- this is main parameter to adjust to change overall activation levels -- it scales both the the ff and fb factors uniformly
- Gi float32 `viewif:"On" min:"0" def:"1.1" desc:"[0.8-1.5 typical, can go lower or higher as needed] overall inhibition gain -- this is main parameter to adjust to change overall activation levels -- it scales both the the ff and fb factors uniformly"`
+ // overall inhibition gain -- this is main parameter to adjust to change overall activation levels -- it scales both the the ff and fb factors uniformly
+ Gi float32 `viewif:"On" min:"0" def:"1.1"`
- // [def: 1] [viewif: On] [min: 0] overall inhibitory contribution from feedforward inhibition -- multiplies average Ge (i.e., synaptic drive into layer) -- this anticipates upcoming changes in excitation, but if set too high, it can make activity slow to emerge -- see also ff0 for a zero-point for this value
- FF float32 `viewif:"On" min:"0" def:"1" desc:"overall inhibitory contribution from feedforward inhibition -- multiplies average Ge (i.e., synaptic drive into layer) -- this anticipates upcoming changes in excitation, but if set too high, it can make activity slow to emerge -- see also ff0 for a zero-point for this value"`
+ // overall inhibitory contribution from feedforward inhibition -- multiplies average Ge (i.e., synaptic drive into layer) -- this anticipates upcoming changes in excitation, but if set too high, it can make activity slow to emerge -- see also ff0 for a zero-point for this value
+ FF float32 `viewif:"On" min:"0" def:"1"`
- // [def: 1] [viewif: On] [min: 0] overall inhibitory contribution from feedback inhibition -- multiplies average activation -- this reacts to layer activation levels and works more like a thermostat (turning up when the 'heat' in the layer is too high)
- FB float32 `viewif:"On" min:"0" def:"1" desc:"overall inhibitory contribution from feedback inhibition -- multiplies average activation -- this reacts to layer activation levels and works more like a thermostat (turning up when the 'heat' in the layer is too high)"`
+ // overall inhibitory contribution from feedback inhibition -- multiplies average activation -- this reacts to layer activation levels and works more like a thermostat (turning up when the 'heat' in the layer is too high)
+ FB float32 `viewif:"On" min:"0" def:"1"`
- // [def: 1.4,3,5] [viewif: On] [min: 0] time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) for integrating feedback inhibitory values -- prevents oscillations that otherwise occur -- the fast default of 1.4 should be used for most cases but sometimes a slower value (3 or higher) can be more robust, especially when inhibition is strong or inputs are more rapidly changing
- FBTau float32 `viewif:"On" min:"0" def:"1.4,3,5" desc:"time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) for integrating feedback inhibitory values -- prevents oscillations that otherwise occur -- the fast default of 1.4 should be used for most cases but sometimes a slower value (3 or higher) can be more robust, especially when inhibition is strong or inputs are more rapidly changing"`
+ // time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) for integrating feedback inhibitory values -- prevents oscillations that otherwise occur -- the fast default of 1.4 should be used for most cases but sometimes a slower value (3 or higher) can be more robust, especially when inhibition is strong or inputs are more rapidly changing
+ FBTau float32 `viewif:"On" min:"0" def:"1.4,3,5"`
- // [def: 0,0.5,1] [viewif: On] what proportion of the maximum vs. average Ge to use in the feedforward inhibition computation -- 0 = all average, 1 = all max, and values in between = proportional mix between average and max (ff_netin = avg + ff_max_vs_avg * (max - avg)) -- including more max can be beneficial especially in situations where the average can vary significantly but the activity should not -- max is more robust in many situations but less flexible and sensitive to the overall distribution -- max is better for cases more closely approximating single or strictly fixed winner-take-all behavior -- 0.5 is a good compromise in many cases and generally requires a reduction of .1 or slightly more (up to .3-.5) from the gi value for 0
- MaxVsAvg float32 `viewif:"On" def:"0,0.5,1" desc:"what proportion of the maximum vs. average Ge to use in the feedforward inhibition computation -- 0 = all average, 1 = all max, and values in between = proportional mix between average and max (ff_netin = avg + ff_max_vs_avg * (max - avg)) -- including more max can be beneficial especially in situations where the average can vary significantly but the activity should not -- max is more robust in many situations but less flexible and sensitive to the overall distribution -- max is better for cases more closely approximating single or strictly fixed winner-take-all behavior -- 0.5 is a good compromise in many cases and generally requires a reduction of .1 or slightly more (up to .3-.5) from the gi value for 0"`
+ // what proportion of the maximum vs. average Ge to use in the feedforward inhibition computation -- 0 = all average, 1 = all max, and values in between = proportional mix between average and max (ff_netin = avg + ff_max_vs_avg * (max - avg)) -- including more max can be beneficial especially in situations where the average can vary significantly but the activity should not -- max is more robust in many situations but less flexible and sensitive to the overall distribution -- max is better for cases more closely approximating single or strictly fixed winner-take-all behavior -- 0.5 is a good compromise in many cases and generally requires a reduction of .1 or slightly more (up to .3-.5) from the gi value for 0
+ MaxVsAvg float32 `viewif:"On" def:"0,0.5,1"`
- // [def: 0.1] [viewif: On] feedforward zero point for average Ge -- below this level, no FF inhibition is computed based on avg Ge, and this value is subtraced from the ff inhib contribution above this value -- the 0.1 default should be good for most cases (and helps FF_FB produce k-winner-take-all dynamics), but if average Ges are lower than typical, you may need to lower it
- FF0 float32 `viewif:"On" def:"0.1" desc:"feedforward zero point for average Ge -- below this level, no FF inhibition is computed based on avg Ge, and this value is subtraced from the ff inhib contribution above this value -- the 0.1 default should be good for most cases (and helps FF_FB produce k-winner-take-all dynamics), but if average Ges are lower than typical, you may need to lower it"`
+ // feedforward zero point for average Ge -- below this level, no FF inhibition is computed based on avg Ge, and this value is subtraced from the ff inhib contribution above this value -- the 0.1 default should be good for most cases (and helps FF_FB produce k-winner-take-all dynamics), but if average Ges are lower than typical, you may need to lower it
+ FF0 float32 `viewif:"On" def:"0.1"`
- // [def: 0,0.05] [viewif: On] extra feedforward inhibition applied when average Ge exceeds a higher threshold -- produces a nonlinear inhibition effect that is consistent with a wide range of neuroscience data, including popout and the Reynolds & Heeger, 2009 attention model
- FFEx float32 `viewif:"On" def:"0,0.05" desc:"extra feedforward inhibition applied when average Ge exceeds a higher threshold -- produces a nonlinear inhibition effect that is consistent with a wide range of neuroscience data, including popout and the Reynolds & Heeger, 2009 attention model"`
+ // extra feedforward inhibition applied when average Ge exceeds a higher threshold -- produces a nonlinear inhibition effect that is consistent with a wide range of neuroscience data, including popout and the Reynolds & Heeger, 2009 attention model
+ FFEx float32 `viewif:"On" def:"0,0.05"`
- // [def: 0.15] [viewif: On] point of average Ge at which extra inhibition based on feedforward level starts
- FFEx0 float32 `viewif:"On" def:"0.15" desc:"point of average Ge at which extra inhibition based on feedforward level starts"`
+ // point of average Ge at which extra inhibition based on feedforward level starts
+ FFEx0 float32 `viewif:"On" def:"0.15"`
- // [view: -] rate = 1 / tau
- FBDt float32 `inactive:"+" view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ FBDt float32 `inactive:"+" view:"-" json:"-" xml:"-"`
}
func (fb *Params) Update() {
diff --git a/fffb/gtigen.go b/fffb/gtigen.go
new file mode 100644
index 000000000..19b5d649c
--- /dev/null
+++ b/fffb/gtigen.go
@@ -0,0 +1,77 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package fffb
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/fffb.Bg",
+ ShortName: "fffb.Bg",
+ IDName: "bg",
+ Doc: "Bg has parameters for a slower, low level of background inhibition\nbased on main FFFB computed inhibition.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "bool", LocalType: "bool", Doc: "enable adaptive layer inhibition gain as stored in layer GiCur value", Directives: gti.Directives{}, Tag: ""}},
+ {"Gi", >i.Field{Name: "Gi", Type: "float32", LocalType: "float32", Doc: "level of inhibition as proporition of FFFB Gi value -- will need to reduce FFFB level to compensate for this additional source of inhibition", Directives: gti.Directives{}, Tag: "def:\".1\" viewif:\"On=true\""}},
+ {"Tau", >i.Field{Name: "Tau", Type: "float32", LocalType: "float32", Doc: "time constant for integrating background inhibition (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life)", Directives: gti.Directives{}, Tag: "def:\"10\" viewif:\"On=true\""}},
+ {"Dt", >i.Field{Name: "Dt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"-\" json:\"-\" xml:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/fffb.Params",
+ ShortName: "fffb.Params",
+ IDName: "params",
+ Doc: "Params parameterizes feedforward (FF) and feedback (FB) inhibition (FFFB)\nbased on average (or maximum) Ge (FF) and activation (FB)",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "go", Directive: "generate", Args: []string{"goki", "generate", "-add-types"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "bool", LocalType: "bool", Doc: "enable this level of inhibition", Directives: gti.Directives{}, Tag: ""}},
+ {"Gi", >i.Field{Name: "Gi", Type: "float32", LocalType: "float32", Doc: "overall inhibition gain -- this is main parameter to adjust to change overall activation levels -- it scales both the the ff and fb factors uniformly", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" def:\"1.1\""}},
+ {"FF", >i.Field{Name: "FF", Type: "float32", LocalType: "float32", Doc: "overall inhibitory contribution from feedforward inhibition -- multiplies average Ge (i.e., synaptic drive into layer) -- this anticipates upcoming changes in excitation, but if set too high, it can make activity slow to emerge -- see also ff0 for a zero-point for this value", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" def:\"1\""}},
+ {"FB", >i.Field{Name: "FB", Type: "float32", LocalType: "float32", Doc: "overall inhibitory contribution from feedback inhibition -- multiplies average activation -- this reacts to layer activation levels and works more like a thermostat (turning up when the 'heat' in the layer is too high)", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" def:\"1\""}},
+ {"FBTau", >i.Field{Name: "FBTau", Type: "float32", LocalType: "float32", Doc: "time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) for integrating feedback inhibitory values -- prevents oscillations that otherwise occur -- the fast default of 1.4 should be used for most cases but sometimes a slower value (3 or higher) can be more robust, especially when inhibition is strong or inputs are more rapidly changing", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" def:\"1.4,3,5\""}},
+ {"MaxVsAvg", >i.Field{Name: "MaxVsAvg", Type: "float32", LocalType: "float32", Doc: "what proportion of the maximum vs. average Ge to use in the feedforward inhibition computation -- 0 = all average, 1 = all max, and values in between = proportional mix between average and max (ff_netin = avg + ff_max_vs_avg * (max - avg)) -- including more max can be beneficial especially in situations where the average can vary significantly but the activity should not -- max is more robust in many situations but less flexible and sensitive to the overall distribution -- max is better for cases more closely approximating single or strictly fixed winner-take-all behavior -- 0.5 is a good compromise in many cases and generally requires a reduction of .1 or slightly more (up to .3-.5) from the gi value for 0", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0,0.5,1\""}},
+ {"FF0", >i.Field{Name: "FF0", Type: "float32", LocalType: "float32", Doc: "feedforward zero point for average Ge -- below this level, no FF inhibition is computed based on avg Ge, and this value is subtraced from the ff inhib contribution above this value -- the 0.1 default should be good for most cases (and helps FF_FB produce k-winner-take-all dynamics), but if average Ges are lower than typical, you may need to lower it", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.1\""}},
+ {"FFEx", >i.Field{Name: "FFEx", Type: "float32", LocalType: "float32", Doc: "extra feedforward inhibition applied when average Ge exceeds a higher threshold -- produces a nonlinear inhibition effect that is consistent with a wide range of neuroscience data, including popout and the Reynolds & Heeger, 2009 attention model", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0,0.05\""}},
+ {"FFEx0", >i.Field{Name: "FFEx0", Type: "float32", LocalType: "float32", Doc: "point of average Ge at which extra inhibition based on feedforward level starts", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.15\""}},
+ {"FBDt", >i.Field{Name: "FBDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"-\" json:\"-\" xml:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/fffb.Inhib",
+ ShortName: "fffb.Inhib",
+ IDName: "inhib",
+ Doc: "Inhib contains state values for computed FFFB inhibition",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"FFi", >i.Field{Name: "FFi", Type: "float32", LocalType: "float32", Doc: "computed feedforward inhibition", Directives: gti.Directives{}, Tag: ""}},
+ {"FBi", >i.Field{Name: "FBi", Type: "float32", LocalType: "float32", Doc: "computed feedback inhibition (total)", Directives: gti.Directives{}, Tag: ""}},
+ {"Gi", >i.Field{Name: "Gi", Type: "float32", LocalType: "float32", Doc: "overall value of the FFFB computed inhibition -- this is what is added into the unit Gi inhibition level (along with GiBg and any synaptic unit-driven inhibition)", Directives: gti.Directives{}, Tag: ""}},
+ {"GiOrig", >i.Field{Name: "GiOrig", Type: "float32", LocalType: "float32", Doc: "original value of the inhibition (before pool or other effects)", Directives: gti.Directives{}, Tag: ""}},
+ {"LayGi", >i.Field{Name: "LayGi", Type: "float32", LocalType: "float32", Doc: "for pools, this is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition", Directives: gti.Directives{}, Tag: ""}},
+ {"Ge", >i.Field{Name: "Ge", Type: "goki.dev/etable/v2/minmax.AvgMax32", LocalType: "minmax.AvgMax32", Doc: "average and max Ge excitatory conductance values, which drive FF inhibition", Directives: gti.Directives{}, Tag: ""}},
+ {"Act", >i.Field{Name: "Act", Type: "goki.dev/etable/v2/minmax.AvgMax32", LocalType: "minmax.AvgMax32", Doc: "average and max Act activation values, which drive FB inhibition", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/fffb.Inhibs",
+ ShortName: "fffb.Inhibs",
+ IDName: "inhibs",
+ Doc: "Inhibs is a slice of Inhib records",
+ Directives: gti.Directives{},
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/fffb/inhib.go b/fffb/inhib.go
index e4c65316d..11fdb8c7b 100644
--- a/fffb/inhib.go
+++ b/fffb/inhib.go
@@ -4,31 +4,31 @@
package fffb
-import "github.com/emer/etable/minmax"
+import "goki.dev/etable/v2/minmax"
// Inhib contains state values for computed FFFB inhibition
type Inhib struct {
// computed feedforward inhibition
- FFi float32 `desc:"computed feedforward inhibition"`
+ FFi float32
// computed feedback inhibition (total)
- FBi float32 `desc:"computed feedback inhibition (total)"`
+ FBi float32
// overall value of the FFFB computed inhibition -- this is what is added into the unit Gi inhibition level (along with GiBg and any synaptic unit-driven inhibition)
- Gi float32 `desc:"overall value of the FFFB computed inhibition -- this is what is added into the unit Gi inhibition level (along with GiBg and any synaptic unit-driven inhibition)"`
+ Gi float32
// original value of the inhibition (before pool or other effects)
- GiOrig float32 `desc:"original value of the inhibition (before pool or other effects)"`
+ GiOrig float32
// for pools, this is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition
- LayGi float32 `desc:"for pools, this is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition"`
+ LayGi float32
// average and max Ge excitatory conductance values, which drive FF inhibition
- Ge minmax.AvgMax32 `desc:"average and max Ge excitatory conductance values, which drive FF inhibition"`
+ Ge minmax.AvgMax32
// average and max Act activation values, which drive FB inhibition
- Act minmax.AvgMax32 `desc:"average and max Act activation values, which drive FB inhibition"`
+ Act minmax.AvgMax32
}
func (fi *Inhib) Init() {
diff --git a/fsfffb/fsfffb.go b/fsfffb/fsfffb.go
index 52a9f96c1..0532eebb4 100644
--- a/fsfffb/fsfffb.go
+++ b/fsfffb/fsfffb.go
@@ -13,7 +13,9 @@ active at any time, where k is typically 10-20 percent of N.
*/
package fsfffb
-import "github.com/goki/gosl/slbool"
+//go:generate goki generate -add-types
+
+import "goki.dev/gosl/v2/slbool"
//gosl: start fsfffb
@@ -24,49 +26,49 @@ import "github.com/goki/gosl/slbool"
type GiParams struct {
// enable this level of inhibition
- On slbool.Bool `desc:"enable this level of inhibition"`
+ On slbool.Bool
- // [def: 1,1.1,0.75,0.9] [viewif: On] [min: 0] [0.8-1.5 typical, can go lower or higher as needed] overall inhibition gain -- this is main parameter to adjust to change overall activation levels -- it scales both the the FS and SS factors uniformly
- Gi float32 `viewif:"On" min:"0" def:"1,1.1,0.75,0.9" desc:"[0.8-1.5 typical, can go lower or higher as needed] overall inhibition gain -- this is main parameter to adjust to change overall activation levels -- it scales both the the FS and SS factors uniformly"`
+ // overall inhibition gain -- this is main parameter to adjust to change overall activation levels -- it scales both the the FS and SS factors uniformly
+ Gi float32 `viewif:"On" min:"0" def:"1,1.1,0.75,0.9"`
- // [def: 0.5,1,4] [viewif: On] [min: 0] amount of FB spikes included in FF for driving FS -- for small networks, 0.5 or 1 works best; larger networks and more demanding inhibition requires higher levels.
- FB float32 `viewif:"On" min:"0" def:"0.5,1,4" desc:"amount of FB spikes included in FF for driving FS -- for small networks, 0.5 or 1 works best; larger networks and more demanding inhibition requires higher levels."`
+ // amount of FB spikes included in FF for driving FS -- for small networks, 0.5 or 1 works best; larger networks and more demanding inhibition requires higher levels.
+ FB float32 `viewif:"On" min:"0" def:"0.5,1,4"`
- // [def: 6] [viewif: On] [min: 0] fast spiking (PV+) intgration time constant in cycles (msec) -- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life.
- FSTau float32 `viewif:"On" min:"0" def:"6" desc:"fast spiking (PV+) intgration time constant in cycles (msec) -- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life."`
+ // fast spiking (PV+) intgration time constant in cycles (msec) -- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life.
+ FSTau float32 `viewif:"On" min:"0" def:"6"`
- // [def: 30] [viewif: On] [min: 0] multiplier on SS slow-spiking (SST+) in contributing to the overall Gi inhibition -- FS contributes at a factor of 1
- SS float32 `viewif:"On" min:"0" def:"30" desc:"multiplier on SS slow-spiking (SST+) in contributing to the overall Gi inhibition -- FS contributes at a factor of 1"`
+ // multiplier on SS slow-spiking (SST+) in contributing to the overall Gi inhibition -- FS contributes at a factor of 1
+ SS float32 `viewif:"On" min:"0" def:"30"`
- // [def: 20] [viewif: On] [min: 0] slow-spiking (SST+) facilitation decay time constant in cycles (msec) -- facilication factor SSf determines impact of FB spikes as a function of spike input-- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life.
- SSfTau float32 `viewif:"On" min:"0" def:"20" desc:"slow-spiking (SST+) facilitation decay time constant in cycles (msec) -- facilication factor SSf determines impact of FB spikes as a function of spike input-- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life."`
+ // slow-spiking (SST+) facilitation decay time constant in cycles (msec) -- facilication factor SSf determines impact of FB spikes as a function of spike input-- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life.
+ SSfTau float32 `viewif:"On" min:"0" def:"20"`
- // [def: 50] [viewif: On] [min: 0] slow-spiking (SST+) intgration time constant in cycles (msec) cascaded on top of FSTau -- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life.
- SSiTau float32 `viewif:"On" min:"0" def:"50" desc:"slow-spiking (SST+) intgration time constant in cycles (msec) cascaded on top of FSTau -- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life."`
+ // slow-spiking (SST+) intgration time constant in cycles (msec) cascaded on top of FSTau -- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life.
+ SSiTau float32 `viewif:"On" min:"0" def:"50"`
- // [def: 0.1] [viewif: On] fast spiking zero point -- below this level, no FS inhibition is computed, and this value is subtracted from the FSi
- FS0 float32 `viewif:"On" def:"0.1" desc:"fast spiking zero point -- below this level, no FS inhibition is computed, and this value is subtracted from the FSi"`
+ // fast spiking zero point -- below this level, no FS inhibition is computed, and this value is subtracted from the FSi
+ FS0 float32 `viewif:"On" def:"0.1"`
- // [def: 50] [viewif: On] time constant for updating a running average of the feedforward inhibition over a longer time scale, for computing FFPrv
- FFAvgTau float32 `viewif:"On" def:"50" desc:"time constant for updating a running average of the feedforward inhibition over a longer time scale, for computing FFPrv"`
+ // time constant for updating a running average of the feedforward inhibition over a longer time scale, for computing FFPrv
+ FFAvgTau float32 `viewif:"On" def:"50"`
- // [def: 0] [viewif: On] proportion of previous average feed-forward inhibition (FFAvgPrv) to add, resulting in an accentuated temporal-derivative dynamic where neurons respond most strongly to increases in excitation that exceeds inhibition from last time.
- FFPrv float32 `viewif:"On" def:"0" desc:"proportion of previous average feed-forward inhibition (FFAvgPrv) to add, resulting in an accentuated temporal-derivative dynamic where neurons respond most strongly to increases in excitation that exceeds inhibition from last time."`
+ // proportion of previous average feed-forward inhibition (FFAvgPrv) to add, resulting in an accentuated temporal-derivative dynamic where neurons respond most strongly to increases in excitation that exceeds inhibition from last time.
+ FFPrv float32 `viewif:"On" def:"0"`
- // [def: 0.05] [viewif: On] minimum GeExt value required to drive external clamping dynamics (if clamp is set), where only GeExt drives inhibition. If GeExt is below this value, then the usual FS-FFFB drivers are used.
- ClampExtMin float32 `viewif:"On" def:"0.05" desc:"minimum GeExt value required to drive external clamping dynamics (if clamp is set), where only GeExt drives inhibition. If GeExt is below this value, then the usual FS-FFFB drivers are used."`
+ // minimum GeExt value required to drive external clamping dynamics (if clamp is set), where only GeExt drives inhibition. If GeExt is below this value, then the usual FS-FFFB drivers are used.
+ ClampExtMin float32 `viewif:"On" def:"0.05"`
- // [view: -] rate = 1 / tau
- FSDt float32 `inactive:"+" view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ FSDt float32 `inactive:"+" view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- SSfDt float32 `inactive:"+" view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ SSfDt float32 `inactive:"+" view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- SSiDt float32 `inactive:"+" view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ SSiDt float32 `inactive:"+" view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- FFAvgDt float32 `inactive:"+" view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ FFAvgDt float32 `inactive:"+" view:"-" json:"-" xml:"-"`
pad float32
}
diff --git a/fsfffb/gtigen.go b/fsfffb/gtigen.go
new file mode 100644
index 000000000..cb9d8a4d7
--- /dev/null
+++ b/fsfffb/gtigen.go
@@ -0,0 +1,86 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package fsfffb
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/fsfffb.GiParams",
+ ShortName: "fsfffb.GiParams",
+ IDName: "gi-params",
+ Doc: "GiParams parameterizes feedforward (FF) and feedback (FB) inhibition (FFFB)\nbased on incoming spikes (FF) and outgoing spikes (FB)\nacross Fast (PV+) and Slow (SST+) timescales.\nFF -> PV -> FS fast spikes, FB -> SST -> SS slow spikes (slow to get going)",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"fsfffb"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"On", >i.Field{Name: "On", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "enable this level of inhibition", Directives: gti.Directives{}, Tag: ""}},
+ {"Gi", >i.Field{Name: "Gi", Type: "float32", LocalType: "float32", Doc: "overall inhibition gain -- this is main parameter to adjust to change overall activation levels -- it scales both the the FS and SS factors uniformly", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" def:\"1,1.1,0.75,0.9\""}},
+ {"FB", >i.Field{Name: "FB", Type: "float32", LocalType: "float32", Doc: "amount of FB spikes included in FF for driving FS -- for small networks, 0.5 or 1 works best; larger networks and more demanding inhibition requires higher levels.", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" def:\"0.5,1,4\""}},
+ {"FSTau", >i.Field{Name: "FSTau", Type: "float32", LocalType: "float32", Doc: "fast spiking (PV+) intgration time constant in cycles (msec) -- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life.", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" def:\"6\""}},
+ {"SS", >i.Field{Name: "SS", Type: "float32", LocalType: "float32", Doc: "multiplier on SS slow-spiking (SST+) in contributing to the overall Gi inhibition -- FS contributes at a factor of 1", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" def:\"30\""}},
+ {"SSfTau", >i.Field{Name: "SSfTau", Type: "float32", LocalType: "float32", Doc: "slow-spiking (SST+) facilitation decay time constant in cycles (msec) -- facilication factor SSf determines impact of FB spikes as a function of spike input-- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life.", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" def:\"20\""}},
+ {"SSiTau", >i.Field{Name: "SSiTau", Type: "float32", LocalType: "float32", Doc: "slow-spiking (SST+) intgration time constant in cycles (msec) cascaded on top of FSTau -- tau is roughly how long it takes for value to change significantly -- 1.4x the half-life.", Directives: gti.Directives{}, Tag: "viewif:\"On\" min:\"0\" def:\"50\""}},
+ {"FS0", >i.Field{Name: "FS0", Type: "float32", LocalType: "float32", Doc: "fast spiking zero point -- below this level, no FS inhibition is computed, and this value is subtracted from the FSi", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.1\""}},
+ {"FFAvgTau", >i.Field{Name: "FFAvgTau", Type: "float32", LocalType: "float32", Doc: "time constant for updating a running average of the feedforward inhibition over a longer time scale, for computing FFPrv", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"50\""}},
+ {"FFPrv", >i.Field{Name: "FFPrv", Type: "float32", LocalType: "float32", Doc: "proportion of previous average feed-forward inhibition (FFAvgPrv) to add, resulting in an accentuated temporal-derivative dynamic where neurons respond most strongly to increases in excitation that exceeds inhibition from last time.", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0\""}},
+ {"ClampExtMin", >i.Field{Name: "ClampExtMin", Type: "float32", LocalType: "float32", Doc: "minimum GeExt value required to drive external clamping dynamics (if clamp is set), where only GeExt drives inhibition. If GeExt is below this value, then the usual FS-FFFB drivers are used.", Directives: gti.Directives{}, Tag: "viewif:\"On\" def:\"0.05\""}},
+ {"FSDt", >i.Field{Name: "FSDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"-\" json:\"-\" xml:\"-\""}},
+ {"SSfDt", >i.Field{Name: "SSfDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"-\" json:\"-\" xml:\"-\""}},
+ {"SSiDt", >i.Field{Name: "SSiDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"-\" json:\"-\" xml:\"-\""}},
+ {"FFAvgDt", >i.Field{Name: "FFAvgDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "inactive:\"+\" view:\"-\" json:\"-\" xml:\"-\""}},
+ {"pad", >i.Field{Name: "pad", Type: "float32", LocalType: "float32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/fsfffb.Inhib",
+ ShortName: "fsfffb.Inhib",
+ IDName: "inhib",
+ Doc: "Inhib contains state values for computed FFFB inhibition",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"fsfffb"}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"FFsRaw", >i.Field{Name: "FFsRaw", Type: "float32", LocalType: "float32", Doc: "all feedforward incoming spikes into neurons in this pool -- raw aggregation", Directives: gti.Directives{}, Tag: ""}},
+ {"FBsRaw", >i.Field{Name: "FBsRaw", Type: "float32", LocalType: "float32", Doc: "all feedback outgoing spikes generated from neurons in this pool -- raw aggregation", Directives: gti.Directives{}, Tag: ""}},
+ {"GeExtRaw", >i.Field{Name: "GeExtRaw", Type: "float32", LocalType: "float32", Doc: "all extra GeExt conductances added to neurons", Directives: gti.Directives{}, Tag: ""}},
+ {"FFs", >i.Field{Name: "FFs", Type: "float32", LocalType: "float32", Doc: "all feedforward incoming spikes into neurons in this pool, normalized by pool size", Directives: gti.Directives{}, Tag: ""}},
+ {"FBs", >i.Field{Name: "FBs", Type: "float32", LocalType: "float32", Doc: "all feedback outgoing spikes generated from neurons in this pool, normalized by pool size", Directives: gti.Directives{}, Tag: ""}},
+ {"GeExts", >i.Field{Name: "GeExts", Type: "float32", LocalType: "float32", Doc: "all extra GeExt conductances added to neurons, normalized by pool size", Directives: gti.Directives{}, Tag: ""}},
+ {"Clamped", >i.Field{Name: "Clamped", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "if true, this layer is hard-clamped and should use GeExts exclusively for PV", Directives: gti.Directives{}, Tag: ""}},
+ {"FSi", >i.Field{Name: "FSi", Type: "float32", LocalType: "float32", Doc: "fast spiking PV+ fast integration of FFs feedforward spikes", Directives: gti.Directives{}, Tag: ""}},
+ {"SSi", >i.Field{Name: "SSi", Type: "float32", LocalType: "float32", Doc: "slow spiking SST+ integration of FBs feedback spikes", Directives: gti.Directives{}, Tag: ""}},
+ {"SSf", >i.Field{Name: "SSf", Type: "float32", LocalType: "float32", Doc: "slow spiking facilitation factor, representing facilitating effects of recent activity", Directives: gti.Directives{}, Tag: ""}},
+ {"FSGi", >i.Field{Name: "FSGi", Type: "float32", LocalType: "float32", Doc: "overall fast-spiking inhibitory conductance", Directives: gti.Directives{}, Tag: ""}},
+ {"SSGi", >i.Field{Name: "SSGi", Type: "float32", LocalType: "float32", Doc: "overall slow-spiking inhibitory conductance", Directives: gti.Directives{}, Tag: ""}},
+ {"Gi", >i.Field{Name: "Gi", Type: "float32", LocalType: "float32", Doc: "overall inhibitory conductance = FSGi + SSGi", Directives: gti.Directives{}, Tag: ""}},
+ {"GiOrig", >i.Field{Name: "GiOrig", Type: "float32", LocalType: "float32", Doc: "original value of the inhibition (before pool or other effects)", Directives: gti.Directives{}, Tag: ""}},
+ {"LayGi", >i.Field{Name: "LayGi", Type: "float32", LocalType: "float32", Doc: "for pools, this is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition", Directives: gti.Directives{}, Tag: ""}},
+ {"FFAvg", >i.Field{Name: "FFAvg", Type: "float32", LocalType: "float32", Doc: "longer time scale running average FF drive -- used for FFAvgPrv", Directives: gti.Directives{}, Tag: ""}},
+ {"FFAvgPrv", >i.Field{Name: "FFAvgPrv", Type: "float32", LocalType: "float32", Doc: "previous theta cycle FFAvg value -- for FFPrv factor -- updated in Decay function that is called at start of new ThetaCycle", Directives: gti.Directives{}, Tag: ""}},
+ {"FFsRawInt", >i.Field{Name: "FFsRawInt", Type: "int32", LocalType: "int32", Doc: "int32 atomic add compatible integration of FFsRaw", Directives: gti.Directives{}, Tag: ""}},
+ {"FBsRawInt", >i.Field{Name: "FBsRawInt", Type: "int32", LocalType: "int32", Doc: "int32 atomic add compatible integration of FBsRaw", Directives: gti.Directives{}, Tag: ""}},
+ {"GeExtRawInt", >i.Field{Name: "GeExtRawInt", Type: "int32", LocalType: "int32", Doc: "int32 atomic add compatible integration of GeExtRaw", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/fsfffb.Inhibs",
+ ShortName: "fsfffb.Inhibs",
+ IDName: "inhibs",
+ Doc: "Inhibs is a slice of Inhib records",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"fsfffb"}},
+ >i.Directive{Tool: "gosl", Directive: "hlsl", Args: []string{"fsfffb"}},
+ >i.Directive{Tool: "gosl", Directive: "end", Args: []string{"fsfffb"}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/fsfffb/inhib.go b/fsfffb/inhib.go
index e7e689744..f52c05379 100644
--- a/fsfffb/inhib.go
+++ b/fsfffb/inhib.go
@@ -7,8 +7,8 @@ package fsfffb
import (
"log"
- "github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "goki.dev/gosl/v2/slbool"
+ "goki.dev/mat32/v2"
)
//gosl: start fsfffb
@@ -17,64 +17,64 @@ import (
type Inhib struct {
// all feedforward incoming spikes into neurons in this pool -- raw aggregation
- FFsRaw float32 `desc:"all feedforward incoming spikes into neurons in this pool -- raw aggregation"`
+ FFsRaw float32
// all feedback outgoing spikes generated from neurons in this pool -- raw aggregation
- FBsRaw float32 `desc:"all feedback outgoing spikes generated from neurons in this pool -- raw aggregation"`
+ FBsRaw float32
// all extra GeExt conductances added to neurons
- GeExtRaw float32 `desc:"all extra GeExt conductances added to neurons"`
+ GeExtRaw float32
// all feedforward incoming spikes into neurons in this pool, normalized by pool size
- FFs float32 `desc:"all feedforward incoming spikes into neurons in this pool, normalized by pool size"`
+ FFs float32
// all feedback outgoing spikes generated from neurons in this pool, normalized by pool size
- FBs float32 `desc:"all feedback outgoing spikes generated from neurons in this pool, normalized by pool size"`
+ FBs float32
// all extra GeExt conductances added to neurons, normalized by pool size
- GeExts float32 `desc:"all extra GeExt conductances added to neurons, normalized by pool size"`
+ GeExts float32
// if true, this layer is hard-clamped and should use GeExts exclusively for PV
- Clamped slbool.Bool `desc:"if true, this layer is hard-clamped and should use GeExts exclusively for PV"`
+ Clamped slbool.Bool
// fast spiking PV+ fast integration of FFs feedforward spikes
- FSi float32 `desc:"fast spiking PV+ fast integration of FFs feedforward spikes"`
+ FSi float32
// slow spiking SST+ integration of FBs feedback spikes
- SSi float32 `desc:"slow spiking SST+ integration of FBs feedback spikes"`
+ SSi float32
// slow spiking facilitation factor, representing facilitating effects of recent activity
- SSf float32 `desc:"slow spiking facilitation factor, representing facilitating effects of recent activity"`
+ SSf float32
// overall fast-spiking inhibitory conductance
- FSGi float32 `desc:"overall fast-spiking inhibitory conductance"`
+ FSGi float32
// overall slow-spiking inhibitory conductance
- SSGi float32 `desc:"overall slow-spiking inhibitory conductance"`
+ SSGi float32
// overall inhibitory conductance = FSGi + SSGi
- Gi float32 `desc:"overall inhibitory conductance = FSGi + SSGi"`
+ Gi float32
// original value of the inhibition (before pool or other effects)
- GiOrig float32 `desc:"original value of the inhibition (before pool or other effects)"`
+ GiOrig float32
// for pools, this is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition
- LayGi float32 `desc:"for pools, this is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition"`
+ LayGi float32
// longer time scale running average FF drive -- used for FFAvgPrv
- FFAvg float32 `desc:"longer time scale running average FF drive -- used for FFAvgPrv"`
+ FFAvg float32
// previous theta cycle FFAvg value -- for FFPrv factor -- updated in Decay function that is called at start of new ThetaCycle
- FFAvgPrv float32 `desc:"previous theta cycle FFAvg value -- for FFPrv factor -- updated in Decay function that is called at start of new ThetaCycle"`
+ FFAvgPrv float32
// int32 atomic add compatible integration of FFsRaw
- FFsRawInt int32 `desc:"int32 atomic add compatible integration of FFsRaw"`
+ FFsRawInt int32
// int32 atomic add compatible integration of FBsRaw
- FBsRawInt int32 `desc:"int32 atomic add compatible integration of FBsRaw"`
+ FBsRawInt int32
// int32 atomic add compatible integration of GeExtRaw
- GeExtRawInt int32 `desc:"int32 atomic add compatible integration of GeExtRaw"`
+ GeExtRawInt int32
}
func (fi *Inhib) Init() {
diff --git a/go.mod b/go.mod
index b085674d5..834ecf541 100644
--- a/go.mod
+++ b/go.mod
@@ -1,31 +1,44 @@
module github.com/emer/axon
-go 1.20
+go 1.21
require (
- github.com/alecthomas/assert/v2 v2.3.0
+ github.com/alecthomas/assert/v2 v2.2.1
github.com/anthonynsimon/bild v0.13.0
- github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b
- github.com/emer/emergent v1.4.31
- github.com/emer/empi v1.0.22
- github.com/emer/etable v1.1.24
- github.com/emer/eve v0.9.5
- github.com/emer/leabra v1.2.7
- github.com/emer/vision v1.1.18
- github.com/goki/gi v1.3.25
- github.com/goki/gosl v1.0.17
+ github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
+ github.com/emer/emergent v1.3.49
+ github.com/emer/emergent/v2 v2.0.0-dev0.0.4
+ github.com/emer/empi/v2 v2.0.0-dev0.0.2
+ github.com/emer/etable v1.1.19
+ github.com/emer/eve/v2 v2.0.0-dev0.0.2
+ github.com/emer/vision/v2 v2.0.0-dev0.0.4
github.com/goki/ki v1.1.17
- github.com/goki/kigen v1.0.2
github.com/goki/mat32 v1.0.18
github.com/goki/vgpu v1.0.34
github.com/goki/vulkan v1.0.7
github.com/stretchr/testify v1.8.4
gitlab.com/gomidi/midi/v2 v2.0.30
- golang.org/x/exp v0.0.0-20231006140011-7918f672742d
+ goki.dev/colors v0.8.43
+ goki.dev/enums v0.9.55
+ goki.dev/etable/v2 v2.0.0-dev0.0.11
+ goki.dev/gi/v2 v2.0.0-dev0.0.27
+ goki.dev/girl v0.0.35
+ goki.dev/glop v0.1.9
+ goki.dev/goosi v0.0.24
+ goki.dev/gosl/v2 v2.0.0-dev0.0.5
+ goki.dev/grr v0.0.11
+ goki.dev/gti v0.1.31
+ goki.dev/icons v0.0.16
+ goki.dev/laser v0.1.33
+ goki.dev/mat32/v2 v2.0.0-dev0.0.27
+ goki.dev/ordmap v0.5.9
+ goki.dev/vgpu/v2 v2.0.0-dev0.0.29
+ goki.dev/xyz v0.5.19
+ golang.org/x/exp v0.0.0-20231219180239-dc181d75b848
)
require (
- git.sr.ht/~sbinet/gg v0.3.1 // indirect
+ git.sr.ht/~sbinet/gg v0.5.0 // indirect
github.com/BurntSushi/freetype-go v0.0.0-20160129220410-b763ddbfe298 // indirect
github.com/BurntSushi/graphics-go v0.0.0-20160129215708-b43f31a4a966 // indirect
github.com/BurntSushi/toml v1.3.2 // indirect
@@ -34,44 +47,68 @@ require (
github.com/Masterminds/vcs v1.13.3 // indirect
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b // indirect
github.com/akutz/sortfold v0.2.1 // indirect
- github.com/alecthomas/chroma/v2 v2.9.1 // indirect
+ github.com/alecthomas/chroma/v2 v2.12.0 // indirect
github.com/alecthomas/repr v0.2.0 // indirect
- github.com/antonmedv/expr v1.15.3 // indirect
+ github.com/antonmedv/expr v1.15.5 // indirect
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 // indirect
+ github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
+ github.com/campoy/embedmd v1.0.0 // indirect
+ github.com/chewxy/math32 v1.10.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dlclark/regexp2 v1.10.0 // indirect
+ github.com/emer/empi v1.0.17 // indirect
+ github.com/expr-lang/expr v1.15.7 // indirect
github.com/fatih/camelcase v1.0.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
- github.com/go-fonts/liberation v0.3.0 // indirect
- github.com/go-gl/glfw/v3.3/glfw v0.0.0-20221017161538-93cebf72946b // indirect
+ github.com/go-fonts/liberation v0.3.2 // indirect
+ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20231124074035-2de0cf0c80af // indirect
github.com/go-gl/mathgl v1.1.0 // indirect
- github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 // indirect
- github.com/go-pdf/fpdf v0.7.0 // indirect
+ github.com/go-latex/latex v0.0.0-20231108140139-5c1ce85aa4ea // indirect
+ github.com/go-pdf/fpdf v0.9.0 // indirect
github.com/goki/freetype v1.0.1 // indirect
+ github.com/goki/gi v1.3.25 // indirect
github.com/goki/go-difflib v1.2.1 // indirect
+ github.com/goki/gosl v1.0.17 // indirect
+ github.com/goki/kigen v1.0.2 // indirect
github.com/goki/pi v1.0.28 // indirect
github.com/goki/prof v1.0.1 // indirect
github.com/goki/vci v1.0.2 // indirect
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
- github.com/gorilla/css v1.0.0 // indirect
+ github.com/gorilla/css v1.0.1 // indirect
github.com/h2non/filetype v1.1.3 // indirect
github.com/hexops/gotextdiff v1.0.3 // indirect
github.com/iancoleman/strcase v0.3.0 // indirect
github.com/jinzhu/copier v0.4.0 // indirect
+ github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/muesli/termenv v0.15.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/rivo/uniseg v0.4.4 // indirect
github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef // indirect
github.com/srwiley/scanx v0.0.0-20190309010443-e94503791388 // indirect
- golang.org/x/image v0.13.0 // indirect
- golang.org/x/mod v0.13.0 // indirect
- golang.org/x/net v0.17.0 // indirect
- golang.org/x/sys v0.13.0 // indirect
- golang.org/x/text v0.13.0 // indirect
- golang.org/x/tools v0.14.0 // indirect
- golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
- gonum.org/v1/gonum v0.12.0 // indirect
- gonum.org/v1/plot v0.12.0 // indirect
+ goki.dev/cam v0.9.48 // indirect
+ goki.dev/cursors v0.0.31 // indirect
+ goki.dev/fi v0.1.18 // indirect
+ goki.dev/grog v0.0.26 // indirect
+ goki.dev/grows v0.3.30 // indirect
+ goki.dev/ki/v2 v2.0.0-dev0.0.32 // indirect
+ goki.dev/pi/v2 v2.0.0-dev0.0.25 // indirect
+ goki.dev/prof/v2 v2.0.0-dev0.0.4 // indirect
+ goki.dev/spell v0.1.14 // indirect
+ goki.dev/svg v0.1.27 // indirect
+ goki.dev/vci/v2 v2.0.0-dev0.0.31 // indirect
+ golang.org/x/image v0.14.0 // indirect
+ golang.org/x/mod v0.14.0 // indirect
+ golang.org/x/net v0.19.0 // indirect
+ golang.org/x/sys v0.15.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/tools v0.16.1 // indirect
+ golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
+ gonum.org/v1/gonum v0.14.0 // indirect
+ gonum.org/v1/plot v0.14.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 56ae83869..e72b7ed1c 100644
--- a/go.sum
+++ b/go.sum
@@ -2,8 +2,10 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
-git.sr.ht/~sbinet/gg v0.3.1 h1:LNhjNn8DerC8f9DHLz6lS0YYul/b602DUxDgGkd/Aik=
-git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
+git.sr.ht/~sbinet/cmpimg v0.1.0 h1:E0zPRk2muWuCqSKSVZIWsgtU9pjsw3eKHi8VmQeScxo=
+git.sr.ht/~sbinet/cmpimg v0.1.0/go.mod h1:FU12psLbF4TfNXkKH2ZZQ29crIqoiqTZmeQ7dkp/pxE=
+git.sr.ht/~sbinet/gg v0.5.0 h1:6V43j30HM623V329xA9Ntq+WJrMjDxRjuAB1LFWF5m8=
+git.sr.ht/~sbinet/gg v0.5.0/go.mod h1:G2C0eRESqlKhS7ErsNey6HHrqU1PwsnCQlekFi9Q2Oo=
github.com/BurntSushi/freetype-go v0.0.0-20160129220410-b763ddbfe298 h1:1qlsVAQJXZHsaM8b6OLVo6muQUQd4CwkH/D3fnnbHXA=
github.com/BurntSushi/freetype-go v0.0.0-20160129220410-b763ddbfe298/go.mod h1:D+QujdIlUNfa0igpNMk6UIvlb6C252URs4yupRUV4lQ=
github.com/BurntSushi/graphics-go v0.0.0-20160129215708-b43f31a4a966 h1:lTG4HQym5oPKjL7nGs+csTgiDna685ZXjxijkne828g=
@@ -25,27 +27,37 @@ github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyR
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
github.com/akutz/sortfold v0.2.1 h1:u9x3FC6oM+6gZKEVNRnmVafJgappwrv9YqpELQCYViI=
github.com/akutz/sortfold v0.2.1/go.mod h1:m1NArmessx+/3z2N8MiiTjq79A3WwZwDDiZ7eeD4jHA=
-github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0=
-github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
-github.com/alecthomas/chroma/v2 v2.9.1 h1:0O3lTQh9FxazJ4BYE/MOi/vDGuHn7B+6Bu902N2UZvU=
-github.com/alecthomas/chroma/v2 v2.9.1/go.mod h1:4TQu7gdfuPjSh76j78ietmqh9LiurGF0EpseFXdKMBw=
+github.com/alecthomas/assert/v2 v2.2.1 h1:XivOgYcduV98QCahG8T5XTezV5bylXe+lBxLG2K2ink=
+github.com/alecthomas/assert/v2 v2.2.1/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ=
+github.com/alecthomas/chroma v0.10.0 h1:7XDcGkCQopCNKjZHfYrNLraA+M7e0fMiJ/Mfikbfjek=
+github.com/alecthomas/chroma/v2 v2.12.0 h1:Wh8qLEgMMsN7mgyG8/qIpegky2Hvzr4By6gEF7cmWgw=
+github.com/alecthomas/chroma/v2 v2.12.0/go.mod h1:4TQu7gdfuPjSh76j78ietmqh9LiurGF0EpseFXdKMBw=
github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk=
github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/anthonynsimon/bild v0.13.0 h1:mN3tMaNds1wBWi1BrJq0ipDBhpkooYfu7ZFSMhXt1C8=
github.com/anthonynsimon/bild v0.13.0/go.mod h1:tpzzp0aYkAsMi1zmfhimaDyX1xjn2OUc1AJZK/TF0AE=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI=
-github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE=
+github.com/antonmedv/expr v1.15.5 h1:y0Iz3cEwmpRz5/r3w4qQR0MfIqJGdGM1zbhD/v0G5Vg=
+github.com/antonmedv/expr v1.15.5/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE=
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ=
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
+github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY=
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
+github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 h1:6lhrsTEnloDPXyeZBvSYvQf8u86jbKehZPVDDlkgDl4=
+github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
+github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY=
+github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/chewxy/math32 v1.10.1 h1:LFpeY0SLJXeaiej/eIp2L40VYfscTvKh/FSEZ68uMkU=
+github.com/chewxy/math32 v1.10.1/go.mod h1:dOB2rcuFrCn6UHrze36WSLVPKtzPMRAQvBvUwkSsLqs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -59,18 +71,26 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0=
github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
-github.com/emer/emergent v1.4.31 h1:CK27lI3/jTH8Sq7ySZ1hlkFX8wgjeHrsE/rIdvWHd+Q=
-github.com/emer/emergent v1.4.31/go.mod h1:ILxelktdMeqzAHj4wVf7jLc1j91hPuEK72XZCDPxPS8=
-github.com/emer/empi v1.0.22 h1:FPrM68LNQYydODqgs6ZddVEGRBOdFOrbUrLhIsn3eL8=
-github.com/emer/empi v1.0.22/go.mod h1:wr0eFV/S2/sAyowA7/8jGmK5AjiWW4saT5QMQuw87GY=
-github.com/emer/etable v1.1.24 h1:lxcZFalEERTQJpbAR0nNfyM4+K/ku2H+Nl6y4cmDHyE=
-github.com/emer/etable v1.1.24/go.mod h1:FBZBONSUL1EjWCoXckKpiGIBAoTFIBdif7tgPj+HjNs=
-github.com/emer/eve v0.9.5 h1:RYmF05WeDi6PGrtbAE5E6qABljkELzP//Bk8XSWLWlM=
-github.com/emer/eve v0.9.5/go.mod h1:qjzX/p6U1UZQCcBxUPY1mTwC0uyAbmGHZ/aLPwF5J2M=
-github.com/emer/leabra v1.2.7 h1:J7v2E/ldkT0zb8m3n5AkZW9eRPC06ieFZr5pJd2deUM=
-github.com/emer/leabra v1.2.7/go.mod h1:cIp9pbADkm5Wl0/y8AhvotIkpAZqXivuEYEGQzxOZJc=
-github.com/emer/vision v1.1.18 h1:EVvd2OqZc5KgY53xbwukzdtO7dvW4ivCRUFQTO7Pz9w=
-github.com/emer/vision v1.1.18/go.mod h1:L80mrcLudVB74up8+NMP4QchHcWap6Z2E0b61h6dP7E=
+github.com/emer/emergent v1.3.49 h1:/22zYCquxfzSP7KVt+LAML5TlGZF86s4/nzSoWvHSRI=
+github.com/emer/emergent v1.3.49/go.mod h1:9xv+lxvdOouAYDzQJQfE5civN3QkrYEifiiGhrtAut4=
+github.com/emer/emergent/v2 v2.0.0-dev0.0.2 h1:9y8LR0nYj8fc9Boqwwesx3RayuSZPfLa23tkIazg5kw=
+github.com/emer/emergent/v2 v2.0.0-dev0.0.2/go.mod h1:wHmouskl+1L/aU9/cqyR2hr/hQ5m2gkHwAe2aSvBL0E=
+github.com/emer/emergent/v2 v2.0.0-dev0.0.4 h1:tNoe+LAWHCdCmlwBrlCniGMdo35C6J9lALAQd2zAi7g=
+github.com/emer/emergent/v2 v2.0.0-dev0.0.4/go.mod h1:3rtJ0T21SOmj12Rb04mZvN9NVdkJYonx9ZsemQp/Vz0=
+github.com/emer/empi v1.0.17 h1:1arFWAzdDR9emrKbz2sTGMhfAa+Kk0y3UTeBW1j9ltw=
+github.com/emer/empi v1.0.17/go.mod h1:cwXlhwSb91QvfadOlVRrXvcpLGa1ld3GZme5ygb4kt8=
+github.com/emer/empi/v2 v2.0.0-dev0.0.2 h1:emqufj0F0vS2Qm7dXDR5cmuEHzVTLdud9/kihMi4UiU=
+github.com/emer/empi/v2 v2.0.0-dev0.0.2/go.mod h1:DYO/HZgXwm90zbH+N1ZY4lclsBNcqWC4DpsCW8qW9fk=
+github.com/emer/etable v1.1.19 h1:nk3ThZBPs5PjYwuM2CCq7MxvyoxBeUFEfn67ewC/IHQ=
+github.com/emer/etable v1.1.19/go.mod h1:c5Uvo5NQl5VhV8Q3VB6CexDd2lRl2PJ04Aa8y65AYUU=
+github.com/emer/eve/v2 v2.0.0-dev0.0.1.0.20231216082813-f792ee727590 h1:xKp6GbG2FUuhLD+eDAon/YU3qavPC143krXgqloi+Og=
+github.com/emer/eve/v2 v2.0.0-dev0.0.1.0.20231216082813-f792ee727590/go.mod h1:HRXq/fOlFnY/8SgvY4XgzGrevdO/7rLcqL1VwgWMg9s=
+github.com/emer/eve/v2 v2.0.0-dev0.0.2 h1:gGplnXkp9WNQAPzMPXGZW0k8SiDx7QfmK0bX4TeQDPU=
+github.com/emer/eve/v2 v2.0.0-dev0.0.2/go.mod h1:tEeI1vsnjNxDmdT8x+RX0PqNAam7dt2hZuuQpT7+MgI=
+github.com/emer/vision/v2 v2.0.0-dev0.0.2 h1:78BoCd7PLErcLH/qyBTUscvkDw3PkfskQicQ1F+uuNs=
+github.com/emer/vision/v2 v2.0.0-dev0.0.2/go.mod h1:K0J+Q8ufL6zrW/M+Cn3vSdL1jDiNgIIPd8APbv8sJeY=
+github.com/emer/vision/v2 v2.0.0-dev0.0.4 h1:AdnJflxTlXhsm7FFiY9rAC2e2GmJ033fjhnftBtMxwo=
+github.com/emer/vision/v2 v2.0.0-dev0.0.4/go.mod h1:+Lqx0BE+02jx4k0dRaq5vKaEpqCUy21VlaARzb4no54=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -78,6 +98,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/expr-lang/expr v1.15.7 h1:BK0JcWUkoW6nrbLBo6xCKhz4BvH5DSOOu1Gx5lucyZo=
+github.com/expr-lang/expr v1.15.7/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ=
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
@@ -88,24 +110,26 @@ github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyT
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ=
github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
+github.com/go-fonts/dejavu v0.3.2 h1:3XlHi0JBYX+Cp8n98c6qSoHrxPa4AUKDMKdrh/0sUdk=
+github.com/go-fonts/dejavu v0.3.2/go.mod h1:m+TzKY7ZEl09/a17t1593E4VYW8L1VaBXHzFZOIjGEY=
github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
-github.com/go-fonts/latin-modern v0.3.0 h1:CIDlMm0djMO3XIKHVz2na9lFKt3kdC/YCy7k7lLpyjE=
+github.com/go-fonts/latin-modern v0.3.2 h1:M+Sq24Dp0ZRPf3TctPnG1MZxRblqyWC/cRUL9WmdaFc=
+github.com/go-fonts/latin-modern v0.3.2/go.mod h1:9odJt4NbRrbdj4UAMuLVd4zEukf6aAEKnDaQga0whqQ=
github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
-github.com/go-fonts/liberation v0.3.0 h1:3BI2iaE7R/s6uUUtzNCjo3QijJu3aS4wmrMgfSpYQ+8=
-github.com/go-fonts/liberation v0.3.0/go.mod h1:jdJ+cqF+F4SUL2V+qxBth8fvBpBDS7yloUL5Fi8GTGY=
+github.com/go-fonts/liberation v0.3.2 h1:XuwG0vGHFBPRRI8Qwbi5tIvR3cku9LUfZGq/Ar16wlQ=
+github.com/go-fonts/liberation v0.3.2/go.mod h1:N0QsDLVUQPy3UYg9XAc3Uh3UDMp2Z7M1o4+X98dXkmI=
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20221017161538-93cebf72946b h1:GgabKamyOYguHqHjSkDACcgoPIz3w0Dis/zJ1wyHHHU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20221017161538-93cebf72946b/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20231124074035-2de0cf0c80af h1:zclgNFqP+NXDgGX2BiDvIonxKIom8j65wQlOyFtyujc=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20231124074035-2de0cf0c80af/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/mathgl v1.1.0 h1:0lzZ+rntPX3/oGrDzYGdowSLC2ky8Osirvf5uAwfIEA=
github.com/go-gl/mathgl v1.1.0/go.mod h1:yhpkQzEiH9yPyxDUGzkmgScbaBVlhC06qodikEM0ZwQ=
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
-github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 h1:NxXI5pTAtpEaU49bpLpQoDsu1zrteW/vxzTz8Cd2UAs=
-github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9/go.mod h1:gWuR/CrFDDeVRFQwHPvsv9soJVB/iqymhuZQuJ3a9OM=
-github.com/go-pdf/fpdf v0.7.0 h1:Kgf56ewNyhYcv6LIbhDWGRF91+e4aGMjpQlabnZnz9Q=
-github.com/go-pdf/fpdf v0.7.0/go.mod h1:gfqhcNwXrsd3XYKte9a7vM3smvU/jB4ZRDrmWSxpfdc=
+github.com/go-latex/latex v0.0.0-20231108140139-5c1ce85aa4ea h1:DfZQkvEbdmOe+JK2TMtBM+0I9GSdzE2y/L1/AmD8xKc=
+github.com/go-latex/latex v0.0.0-20231108140139-5c1ce85aa4ea/go.mod h1:Y7Vld91/HRbTBm7JwoI7HejdDB0u+e9AUBO9MB7yuZk=
+github.com/go-pdf/fpdf v0.9.0 h1:PPvSaUuo1iMi9KkaAn90NuKi+P4gwMedWPHhj8YlJQw=
+github.com/go-pdf/fpdf v0.9.0/go.mod h1:oO8N111TkmKb9D7VvWGLvLJlaZUQVPM+6V42pp3iV4Y=
github.com/goki/freetype v1.0.1 h1:10DgpEu+QEh/hpvAxgx//RT8ayWwHJI+nZj3QNcn8uk=
github.com/goki/freetype v1.0.1/go.mod h1:ni9Dgz8vA6o+13u1Ke0q3kJcCJ9GuXb1dtlfKho98vs=
github.com/goki/gi v1.3.25 h1:ujr3BIGRx0EWo9b2MmPuNj5AunHrTUXoWnDZRv9jy6k=
@@ -160,8 +184,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
-github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
+github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
+github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg=
github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
@@ -179,10 +203,18 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.1 h1:wXr2uRxZTJXHLly6qhJabee5JqIhTRoLBhDOA74hDEQ=
github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
+github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
+github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
@@ -193,10 +225,14 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
+github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
-github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
+github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
@@ -204,9 +240,11 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/srwiley/oksvg v0.0.0-20220128195007-1f435e4c2b44 h1:XPYXKIuH/n5zpUoEWk2jWV/SjEMNYmqDYmTgbjmhtaI=
+github.com/srwiley/oksvg v0.0.0-20220128195007-1f435e4c2b44/go.mod h1:cNQ3dwVJtS5Hmnjxy6AgTPd0Inb3pW05ftPSX7NZO7Q=
github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef h1:Ch6Q+AZUxDBCVqdkI8FSpFyZDtCVBc2VmejdNrm5rRQ=
github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef/go.mod h1:nXTWP6+gD5+LUJ8krVhhoeHjvHTutPxMYl5SvkcnJNE=
github.com/srwiley/scanFT v0.0.0-20220128184157-0d1ee492111f h1:uLR2GaV0kWYZ3Ns3l3sjtiN+mOWAQadvrL8HXcyKjl0=
+github.com/srwiley/scanFT v0.0.0-20220128184157-0d1ee492111f/go.mod h1:LZwgIPG9X6nH6j5Ef+xMFspl6Hru4b5EJxzMfeqHYJY=
github.com/srwiley/scanx v0.0.0-20190309010443-e94503791388 h1:ZdkidVdpLW13BQ9a+/3uerT2ezy9J7KQWH18JCfhDmI=
github.com/srwiley/scanx v0.0.0-20190309010443-e94503791388/go.mod h1:C/WY5lmWfMtPFYYBTd3Lzdn4FTLr+RxlIeiBNye+/os=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -222,6 +260,112 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
gitlab.com/gomidi/midi/v2 v2.0.30 h1:RgRYbQeQSab5ZaP1lqRcCTnTSBQroE3CE6V9HgMmOAc=
gitlab.com/gomidi/midi/v2 v2.0.30/go.mod h1:Y6IFFyABN415AYsFMPJb0/43TRIuVYDpGKp2gDYLTLI=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+goki.dev/cam v0.9.46 h1:FwqL0SjwR5mBESzOi4FHN9/HRhbiDPmvD75EcGVPmL0=
+goki.dev/cam v0.9.46/go.mod h1:CI53lzIdarlFDHwvCzGipC3SAAWnBGe+ktKMAB40kMI=
+goki.dev/cam v0.9.48 h1:KlQxA9YURsvt/iZORFIRPA/P54k3HZBNx9Y0TPr/6jA=
+goki.dev/cam v0.9.48/go.mod h1:NIrTbYYtuFtJIU1YJJiAWW+gpswGd4iqu7AO/9GbOsI=
+goki.dev/colors v0.8.41 h1:siXB9CBy0M2mB6JsLo3/w8FyAul+O0Wv50LLOA6vc7Y=
+goki.dev/colors v0.8.41/go.mod h1:uAV51GLJjVBiLCOViOmC0N8rrTP+WaYyg/uTO5rdltw=
+goki.dev/colors v0.8.43 h1:MJRx+bbshIa+QZc4J5SgmdqbJ8M7era9eeuLcw/F2RA=
+goki.dev/colors v0.8.43/go.mod h1:B8j9nGf6MT0B05GVDFpjmPx3/Dy3MRxUV4dmVvah1d0=
+goki.dev/cursors v0.0.29 h1:452aO/m5ND7NLkT9oYoH/uzsv2gyGQX0qnPKwcOLIs4=
+goki.dev/cursors v0.0.29/go.mod h1:4Jg2YDs+MWpgeH/KmTTmgwOVqszI29FJ2IiajXIvq1g=
+goki.dev/cursors v0.0.31 h1:7L8tg3AL+OeV3os1htsv9OqzIn+IodnzO39T//gZEq0=
+goki.dev/cursors v0.0.31/go.mod h1:RGypRo+PIksWnx0YBXAptUCZ0Pn32F/SYt20mnfT7D8=
+goki.dev/enums v0.9.52 h1:q5BYXyJoYZqrLbBdP3NQSzcmedy2yQ331cWu4Xs4zr4=
+goki.dev/enums v0.9.52/go.mod h1:T8vLSgJQAkTP0WdPa0aZSS/yMYJHrqPKafqYwQTfWAU=
+goki.dev/enums v0.9.55 h1:tYPS9Y0nXqlU6banoVGpi//xg7FyHlSPjQzyh2amqTQ=
+goki.dev/enums v0.9.55/go.mod h1:BrbSAGyPGrJZYb3sqr1db2+MPLl1boH0zDByDbtjKek=
+goki.dev/etable/v2 v2.0.0-dev0.0.9 h1:41MgbuML3eGkyxaExnfNPGuJeObedVDqwRbwL6W4VrM=
+goki.dev/etable/v2 v2.0.0-dev0.0.9/go.mod h1:/R5LT9kON5pNUFqEXovNpKMFtPSpMceLatzizRIm2kI=
+goki.dev/etable/v2 v2.0.0-dev0.0.11 h1:f4DncIjMLfSknH3xNeUMjG8Kf9c/h1Kz5MZr8J6EQjI=
+goki.dev/etable/v2 v2.0.0-dev0.0.11/go.mod h1:MSqMMym9dwi+CfS2BwQDEtBMWiVCWXLNDTMED9mHfuI=
+goki.dev/fi v0.1.16 h1:GwXqm+IHk9JcyEfheAypN59ozcwgHAm/LYpFsqJDeHM=
+goki.dev/fi v0.1.16/go.mod h1:uuhpDb+jK89otuPHDUQJ38VefApaA/yIE70sKCED+Wo=
+goki.dev/fi v0.1.18 h1:3hPdJPOkJfsWTAgyraNrSda6Ckfe+Z2FaUUn4Q6kyUY=
+goki.dev/fi v0.1.18/go.mod h1:yDHvfIHCOK+SMxKcVHfyoyI4TuLVdR8hohGOk/9nN9A=
+goki.dev/gi/v2 v2.0.0-dev0.0.25 h1:Tixd5JG0Odwt5mJhyRUUA5C7sc3hlKvld91BTbOCnCc=
+goki.dev/gi/v2 v2.0.0-dev0.0.25/go.mod h1:uci6WbkLPdRm1GZfrpNPP48bl5rkoOfQmvZ5+0a+8Qw=
+goki.dev/gi/v2 v2.0.0-dev0.0.27 h1:XhpXzNcRX61YtdqXG/ffLPtjz5beddSXWl/IVOHtTNE=
+goki.dev/gi/v2 v2.0.0-dev0.0.27/go.mod h1:J9e3fm5mbfxsg4z64GKGU9VHSRCsH3n3Z6+9y8uM9KU=
+goki.dev/girl v0.0.32 h1:K31garFp3wTipvsfSs0YWwOiyU0F2vHIqw9QTJHEMpg=
+goki.dev/girl v0.0.32/go.mod h1:EZDS9vgd5+vYPYZxhUmcxABEsFPFJ2dzRgr2Kr4lyp8=
+goki.dev/girl v0.0.35 h1:xeiWwG0jgOrbilf6XuzMgPuHJ/X3AXe5/owHKT0DgfQ=
+goki.dev/girl v0.0.35/go.mod h1:l3SrITAwAtcfDUgcI4rJgcalHNh1voalFTGJgX4eMhI=
+goki.dev/glop v0.1.9-0.20231215195749-0ece7c1642b7 h1:6MldAHhNa69x7X1Bq/+Mo+5Bw/U5kA2bxcVwupAyAi0=
+goki.dev/glop v0.1.9-0.20231215195749-0ece7c1642b7/go.mod h1:Q9TPQIlJ5LL38UITJonRUEmEbPPTHWOJED+xD63VmIg=
+goki.dev/glop v0.1.9 h1:s6FFcCXqNy2FLEG5k+ULvxEvbYtHnW7KoqvEw9uPsP0=
+goki.dev/glop v0.1.9/go.mod h1:Q9TPQIlJ5LL38UITJonRUEmEbPPTHWOJED+xD63VmIg=
+goki.dev/goosi v0.0.22 h1:G9J3A02DOlNaXwiRo+5ox46kzpmAEuj87KVQhhuXutg=
+goki.dev/goosi v0.0.22/go.mod h1:+/7oPw1dgpssVMlQPbnqK3OEUg972eiAnDF7NIWNEbU=
+goki.dev/goosi v0.0.24 h1:ihObJPh07enlfpznMLM77JWAOAz6iaA1o/2JYbiAvhA=
+goki.dev/goosi v0.0.24/go.mod h1:saV+W9tD1p+WRpfdgc4FGGnzHGoi0nPi1+0EMEFCPt8=
+goki.dev/gosl/v2 v2.0.0-dev0.0.3.0.20231216082948-11455e26c72b h1:Hhnc8fhrq2mU2KQcaQq6xb8ifrDQ/Rk/b3kx8byE8ig=
+goki.dev/gosl/v2 v2.0.0-dev0.0.3.0.20231216082948-11455e26c72b/go.mod h1:pPHH35o3EMMfNVrbQJBH60f1SjHhm6VSg53vx+H07nw=
+goki.dev/gosl/v2 v2.0.0-dev0.0.5 h1:8XVM50+CV6rD8MxRTyKS77NUbhP+mWUfutZfUz2dcIw=
+goki.dev/gosl/v2 v2.0.0-dev0.0.5/go.mod h1:dZCtITJoTRKmzBVqs7S1M9Ml2ieLXkARvSRNl0JoXnE=
+goki.dev/grog v0.0.24 h1:tc1BrbcXrjJj2qEj01A9mNGdsRIEGJOT7KHJBpu1H78=
+goki.dev/grog v0.0.24/go.mod h1:syaDqOz6EyYmt1uxt6vZHobH0rnQmQptxyM0R8qPTaw=
+goki.dev/grog v0.0.26 h1:x5a7gJViBqsLGJZIM1lUmk76c/bvi/OmUkYIPZEB2CY=
+goki.dev/grog v0.0.26/go.mod h1:ia+zG+6owggSFju6HbQFOT+/R8I0ankfcBow6g/UKd4=
+goki.dev/grows v0.3.27 h1:Q9HFyZVZYxL8X/nOtNopXqQxfzzHMBA9UNvLTo3bc1U=
+goki.dev/grows v0.3.27/go.mod h1:4uZ4GFrqVYIvAPlATTQtDsJcUnyojOwwrO+OQXsmVtA=
+goki.dev/grows v0.3.30 h1:X/fy/Oh5r+m96WeDEjuZ3w9bqrkrdmcNxU0wmnxNEDg=
+goki.dev/grows v0.3.30/go.mod h1:9WydssNK8Vjyu24SVXnCoCiG3SRm+RSFbf+mzXkp+2A=
+goki.dev/grr v0.0.9 h1:iFkelH1Lf17Z7hlRUsID/td0ZJj1oImzgG4RBpDh5OA=
+goki.dev/grr v0.0.9/go.mod h1:i8HvzZY1VPU3u/rNCw4No6/cdwVw0iOUUz4Yo/o/e4c=
+goki.dev/grr v0.0.11 h1:BXDUzW8y9F+KXymOFvbxoPWxEl95LSwNapoooQ+D0ug=
+goki.dev/grr v0.0.11/go.mod h1:i8HvzZY1VPU3u/rNCw4No6/cdwVw0iOUUz4Yo/o/e4c=
+goki.dev/gti v0.1.28 h1:gt4iEQms3AnRg/WWOXXLFzTvcH98wZJHepfWUVA+cT8=
+goki.dev/gti v0.1.28/go.mod h1:6fFahcbZqEe0nA/AcOaiq/hER/MAPnL/BGEbtxK6164=
+goki.dev/gti v0.1.31 h1:3DZYNumyYy3Dx4D3lbj7TEtzkp538p4KYt4wEl6cvUg=
+goki.dev/gti v0.1.31/go.mod h1:IOZilIh0ngDaf4faSNj9oFkGLRIFFOyg6w8JFGcB+6w=
+goki.dev/icons v0.0.14 h1:VLpSO1sBAQpPUBQHK9Cm21EtMaGYCp/HR/z20OqJUS8=
+goki.dev/icons v0.0.14/go.mod h1:19ubuG+G5p+wM/N7AU7Tgnr5/ygPDd1EivY1ipjiOZw=
+goki.dev/icons v0.0.16 h1:p8kkJcQZYmZ4iEm2VwoIndUtPtUk8EZFWLagP9QBl6U=
+goki.dev/icons v0.0.16/go.mod h1:W8v1n9qpjeVylA9SqW2XxAYI+m9vbj2NxTJcxcmPmrs=
+goki.dev/ki/v2 v2.0.0-dev0.0.30 h1:/eQsAPcrXTOMSJWlUZWH1AcsunmzlMy++Lhppje6oMc=
+goki.dev/ki/v2 v2.0.0-dev0.0.30/go.mod h1:XLvLdqNSZ0aFASKI+/IP5qdc9y5z87ics8DDw7NNAKo=
+goki.dev/ki/v2 v2.0.0-dev0.0.32 h1:7QOhSGWrOuIEdk8XZTJxBeyamFRz8coFfHhPlJ0tdA4=
+goki.dev/ki/v2 v2.0.0-dev0.0.32/go.mod h1:uGGvB+CCEySB8Vz/eZ/xqSXPllRsOVGu5+6v2RuofpA=
+goki.dev/laser v0.1.30 h1:SzvpQom50asqca9nJFasA5EMg9s96oPDaFT5t5fxlWA=
+goki.dev/laser v0.1.30/go.mod h1:mnnpZvmB23N+lS4x5jQIKGRJFgvebc+qNNd7eK0hwuk=
+goki.dev/laser v0.1.33 h1:Ho6A7JhfcZOBArMWPeeFPYXVX3QXWqkp+H/pxelWTyo=
+goki.dev/laser v0.1.33/go.mod h1:SBAKzsIXQdBqQIwnEm913xj9604q82wjZ7soDMiCWH8=
+goki.dev/mat32/v2 v2.0.0-dev0.0.24 h1:RY+aG3OmbRiNls8X7kGD+kkLWRukmEZRgCE8CDjpNYQ=
+goki.dev/mat32/v2 v2.0.0-dev0.0.24/go.mod h1:C76M7oLT638VCRfgqaTJ8M4fhz+TeahxvODHEXdYDzw=
+goki.dev/mat32/v2 v2.0.0-dev0.0.27 h1:6z1XdcKu0BHx4fJvVcNy5OlbLFrVyPRIbFG1wDBPubw=
+goki.dev/mat32/v2 v2.0.0-dev0.0.27/go.mod h1:q0M0c8sjbEKS+2aLOhiPx7RLEiWn6WPzLVRdRjDOAzE=
+goki.dev/ordmap v0.5.8 h1:eFmEyqsmOLa015CHcnBjQ/uxUhJ6y18GyJl3RVQn6Wk=
+goki.dev/ordmap v0.5.8/go.mod h1:m3CYoDJcio+Z9aXipUdg3yLUjKspxnVc8es6GWrDAwQ=
+goki.dev/ordmap v0.5.9 h1:IKUmvSqkNHP2Ryde0tOlqEVG8jlCPC3JtrkV00aO0fc=
+goki.dev/ordmap v0.5.9/go.mod h1:m3CYoDJcio+Z9aXipUdg3yLUjKspxnVc8es6GWrDAwQ=
+goki.dev/pi/v2 v2.0.0-dev0.0.23 h1:HB94HnYz2KNlbeknozv0MVgNmhWYoF4peW3dNqmULMY=
+goki.dev/pi/v2 v2.0.0-dev0.0.23/go.mod h1:A8rDydPszmAW8Zpgagg0Odk2xBaL+Iv/2TTSQUVqdTE=
+goki.dev/pi/v2 v2.0.0-dev0.0.25 h1:QsN66E3R7+7n/Sc43UWtXCyU0ZX+8v9zDSkxWOMZFKU=
+goki.dev/pi/v2 v2.0.0-dev0.0.25/go.mod h1:gAwJsGFEnY5wPqvCw5j0pgkcmbYjB/kGY2/lOxzrzYs=
+goki.dev/prof/v2 v2.0.0-dev0.0.4 h1:paf9YO++bwGBa/ig7J+HbWLNuIJqAxeyJJiFu2d8YHk=
+goki.dev/prof/v2 v2.0.0-dev0.0.4/go.mod h1:gMwjHzdBy06YspRuxfTEJ65QMQ1gMafmib75mNC7vnA=
+goki.dev/spell v0.1.12 h1:L9TBvhRsCkmsuCSwKiFULttrQGc9bcp6BZoUe9i/pZE=
+goki.dev/spell v0.1.12/go.mod h1:q6aA4M2b1Qa+S71mGICz0E6Cy7o1kAnnzUonQjllIUk=
+goki.dev/spell v0.1.14 h1:Z+bc+z5cqGMvcRN54yl/p/Qb7k7Xnlw3rxNo2r5/fbo=
+goki.dev/spell v0.1.14/go.mod h1:RLKi/VAs2Kmz+2an/DlkBRzWCnCxUTKJrBrcYF96gV8=
+goki.dev/svg v0.1.25 h1:VZMzyV08J+GneK7DsioUpyQGYeK00OW7jz9E3MkszNo=
+goki.dev/svg v0.1.25/go.mod h1:ehdC9aQ7r8DfcicsFKyxfG2lUu1iUnZEmC7t9rZ70v8=
+goki.dev/svg v0.1.27 h1:Wtt62rjr3PEDy7/qbjqiYv0Q0AsPSYag8KRV3EXeewc=
+goki.dev/svg v0.1.27/go.mod h1:sXzrje+R6VxGf9grQhpOrS/+rzlot1YV5sM4hvjeXNo=
+goki.dev/vci/v2 v2.0.0-dev0.0.28 h1:VzJN2GhXydiTyoLezWUPR0sQdF+deHnsRNUwbBYy6WA=
+goki.dev/vci/v2 v2.0.0-dev0.0.28/go.mod h1:RsHaOxju5el9a+shpHM2vMk3EXfHjbF8vvlyzsUWQrg=
+goki.dev/vci/v2 v2.0.0-dev0.0.31 h1:wYHbPTI924EsJ0iQeEuGfTUqsjEH1YCP8q9mOCUN2Ns=
+goki.dev/vci/v2 v2.0.0-dev0.0.31/go.mod h1:T2BsLNmCnakJHnwkMCS+E0C+/QL+akQipdMW5y6bG6s=
+goki.dev/vgpu/v2 v2.0.0-dev0.0.26 h1:+tU5uzSiKIFmf9LcfdBlEPD2Rm06wpOOfeOVIShcWhw=
+goki.dev/vgpu/v2 v2.0.0-dev0.0.26/go.mod h1:svSzADm3NSRwUqIctzyn5JmWctB5FsIelBAbyW0UNK4=
+goki.dev/vgpu/v2 v2.0.0-dev0.0.29 h1:RyF4LyXOll49EoOZy9IgBAddc32qp9dxXKkf3bHqk5w=
+goki.dev/vgpu/v2 v2.0.0-dev0.0.29/go.mod h1:w7sRFPuBp9r0+jO/yfsUvh1rGv/Ico1hXO+TOBPhIhI=
+goki.dev/xyz v0.5.17 h1:RGHmG97XQmwO+RpH7PG+xnwL39zFD33/k/a5HiKkh9U=
+goki.dev/xyz v0.5.17/go.mod h1:aCLTLi0USpF3PgfohK/8UyyQ8Rzn9Qk8ckcXcksXfCU=
+goki.dev/xyz v0.5.19 h1:r9AF2u4mqLGKI614QSzmo99qkvfa5OzOtTIGK5GEMaQ=
+goki.dev/xyz v0.5.19/go.mod h1:Gjp56XcY6pBEmam+HszFFTaEpktG5DeuSbBvbAA7NN0=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -233,8 +377,10 @@ golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
-golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
-golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
+golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8=
+golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
+golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 h1:+iq7lrkxmFNBM7xx+Rae2W6uyPfhPeDWD+n+JgppptE=
+golang.org/x/exp v0.0.0-20231219180239-dc181d75b848/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190321063152-3fc05d484e9f/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
@@ -246,9 +392,8 @@ golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+o
golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
-golang.org/x/image v0.13.0 h1:3cge/F/QTkNLauhf2QoE9zp+7sr+ZcL4HnoZmdwg9sg=
-golang.org/x/image v0.13.0/go.mod h1:6mmbMOeV28HuMTgA6OSRkdXKYw/t5W9Uwn2Yv1r3Yxk=
+golang.org/x/image v0.14.0 h1:tNgSxAFe3jC4uYqvZdTr84SZoM1KfwdC9SKIFrLjFn4=
+golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -258,8 +403,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
-golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -271,8 +416,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
-golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
+golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -281,7 +426,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
+golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -296,15 +442,16 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -317,24 +464,24 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
-golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
+golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
+golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
-gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o=
-gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY=
+gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0=
+gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
-gonum.org/v1/plot v0.12.0 h1:y1ZNmfz/xHuHvtgFe8USZVyykQo5ERXPnspQNVK15Og=
-gonum.org/v1/plot v0.12.0/go.mod h1:PgiMf9+3A3PnZdJIciIXmyN1FwdAA6rXELSN761oQkw=
+gonum.org/v1/plot v0.14.0 h1:+LBDVFYwFe4LHhdP8coW6296MBEY4nQ+Y4vuUpJopcE=
+gonum.org/v1/plot v0.14.0/go.mod h1:MLdR9424SJed+5VqC6MsouEpig9pZX2VZ57H9ko2bXU=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
diff --git a/interinhib/README.md b/interinhib/README.md
deleted file mode 100644
index 15aa15810..000000000
--- a/interinhib/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
-Package `interinhib` provides inter-layer inhibition params, which can be added to Layer types.
-
-Note: it is better to use direct inhibitory projections -- try that first before using this!
-
-Note: the following has not been updated from Leabra version to axon:
-
-Call at the start of the Layer InhibFmGeAct method like this:
-
-```Go
-// InhibFmGeAct computes inhibition Gi from Ge and Act averages within relevant Pools
-func (ly *Layer) InhibFmGeAct(lctxt *Context) {
- lpl := &ly.Pools[0]
- ly.Params.Inhib.Layer.Inhib(&lpl.Inhib)
- ly.InterInhib.Inhib(&ly.Layer) // does inter-layer inhibition
- ly.PoolInhibFmGeAct(ltime)
-}
-```
-
-
diff --git a/interinhib/interinhib.go b/interinhib/interinhib.go
deleted file mode 100644
index ac11e41ad..000000000
--- a/interinhib/interinhib.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (c) 2020, The Emergent Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package interinhib provides inter-layer inhibition params,
-which can be added to Layer types. Call at the start of the
-Layer InhibFmGeAct method like this:
-// InhibFmGeAct computes inhibition Gi from Ge and Act averages within relevant Pools
-
- func (ly *Layer) InhibFmGeAct(lctxt *Context) {
- lpl := &ly.Pools[0]
- ly.Params.Inhib.Layer.Inhib(&lpl.Inhib)
- ly.InterInhib.Inhib(&ly.Layer) // does inter-layer inhibition
- ly.PoolInhibFmGeAct(ltime)
- }
-*/
-package interinhib
-
-import (
- "github.com/emer/axon/axon"
- "github.com/emer/emergent/emer"
- "github.com/goki/mat32"
-)
-
-// InterInhib specifies inhibition between layers, where
-// the receiving layer either does a Max or Add of portion of
-// inhibition from other layer(s).
-type InterInhib struct {
-
- // layers to receive inhibition from
- Lays emer.LayNames `desc:"layers to receive inhibition from"`
-
- // multiplier on Gi from other layers
- Gi float32 `desc:"multiplier on Gi from other layers"`
-
- // add inhibition -- otherwise Max
- Add bool `desc:"add inhibition -- otherwise Max"`
-}
-
-func (il *InterInhib) Defaults() {
- il.Gi = 0.5
-}
-
-// Inhib updates layer inhibition based on other layer inhibition
-func (il *InterInhib) Inhib(ly *axon.Layer) {
- ogi := il.Gi * il.OtherGi(ly.Network)
- lpl := &ly.Pools[0]
- if il.Add {
- lpl.Inhib.Gi += ogi
- } else {
- lpl.Inhib.Gi = mat32.Max(ogi, lpl.Inhib.Gi)
- }
-}
-
-// OtherGi returns either the Sum (for Add) or Max of other layer Gi values.
-// These are the raw values, not multiplied by Gi factor.
-func (il *InterInhib) OtherGi(net emer.Network) float32 {
- gi := float32(0)
- for _, lnm := range il.Lays {
- oli := net.LayerByName(lnm)
- if oli == nil {
- continue
- }
- ol := oli.(axon.AxonLayer).AsAxon()
- ogi := ol.Pools[0].Inhib.GiOrig
- if il.Add {
- gi += ogi
- } else {
- gi = mat32.Max(gi, ogi)
- }
- }
- return gi
-}
diff --git a/kinase/enumgen.go b/kinase/enumgen.go
new file mode 100644
index 000000000..4f4eea767
--- /dev/null
+++ b/kinase/enumgen.go
@@ -0,0 +1,131 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package kinase
+
+import (
+ "errors"
+ "log"
+ "strconv"
+ "strings"
+
+ "goki.dev/enums"
+)
+
+var _RulesValues = []Rules{0, 1, 2, 3}
+
+// RulesN is the highest valid value
+// for type Rules, plus one.
+const RulesN Rules = 4
+
+// An "invalid array index" compiler error signifies that the constant values have changed.
+// Re-run the enumgen command to generate them again.
+func _RulesNoOp() {
+ var x [1]struct{}
+ _ = x[SynSpkCont-(0)]
+ _ = x[SynNMDACont-(1)]
+ _ = x[SynSpkTheta-(2)]
+ _ = x[NeurSpkTheta-(3)]
+}
+
+var _RulesNameToValueMap = map[string]Rules{
+ `SynSpkCont`: 0,
+ `synspkcont`: 0,
+ `SynNMDACont`: 1,
+ `synnmdacont`: 1,
+ `SynSpkTheta`: 2,
+ `synspktheta`: 2,
+ `NeurSpkTheta`: 3,
+ `neurspktheta`: 3,
+}
+
+var _RulesDescMap = map[Rules]string{
+ 0: `SynSpkCont implements synaptic-level Ca signals at an abstract level, purely driven by spikes, not NMDA channel Ca, as a product of sender and recv CaSyn values that capture the decaying Ca trace from spiking, qualitatively as in the NMDA dynamics. These spike-driven Ca signals are integrated in a cascaded manner via CaM, then CaP (reflecting CaMKII) and finally CaD (reflecting DAPK1). It uses continuous learning based on temporary DWt (TDWt) values based on the TWindow around spikes, which convert into DWt after a pause in synaptic activity (no arbitrary ThetaCycle boundaries). There is an option to compare with SynSpkTheta by only doing DWt updates at the theta cycle level, in which case the key difference is the use of TDWt, which can remove some variability associated with the arbitrary timing of the end of trials.`,
+ 1: `SynNMDACont is the same as SynSpkCont with NMDA-driven calcium signals computed according to the very close approximation to the Urakubo et al (2008) allosteric NMDA dynamics, then integrated at P vs. D time scales. This is the most biologically realistic yet computationally tractable verseion of the Kinase learning algorithm.`,
+ 2: `SynSpkTheta abstracts the SynSpkCont algorithm by only computing the DWt change at the end of the ThetaCycle, instead of continuous updating. This allows an optimized implementation that is roughly 1/3 slower than the fastest NeurSpkTheta version, while still capturing much of the learning dynamics by virtue of synaptic-level integration.`,
+ 3: `NeurSpkTheta uses neuron-level spike-driven calcium signals integrated at P vs. D time scales -- this is the original Leabra and Axon XCAL / CHL learning rule. It exhibits strong sensitivity to final spikes and thus high levels of variance.`,
+}
+
+var _RulesMap = map[Rules]string{
+ 0: `SynSpkCont`,
+ 1: `SynNMDACont`,
+ 2: `SynSpkTheta`,
+ 3: `NeurSpkTheta`,
+}
+
+// String returns the string representation
+// of this Rules value.
+func (i Rules) String() string {
+ if str, ok := _RulesMap[i]; ok {
+ return str
+ }
+ return strconv.FormatInt(int64(i), 10)
+}
+
+// SetString sets the Rules value from its
+// string representation, and returns an
+// error if the string is invalid.
+func (i *Rules) SetString(s string) error {
+ if val, ok := _RulesNameToValueMap[s]; ok {
+ *i = val
+ return nil
+ }
+ if val, ok := _RulesNameToValueMap[strings.ToLower(s)]; ok {
+ *i = val
+ return nil
+ }
+ return errors.New(s + " is not a valid value for type Rules")
+}
+
+// Int64 returns the Rules value as an int64.
+func (i Rules) Int64() int64 {
+ return int64(i)
+}
+
+// SetInt64 sets the Rules value from an int64.
+func (i *Rules) SetInt64(in int64) {
+ *i = Rules(in)
+}
+
+// Desc returns the description of the Rules value.
+func (i Rules) Desc() string {
+ if str, ok := _RulesDescMap[i]; ok {
+ return str
+ }
+ return i.String()
+}
+
+// RulesValues returns all possible values
+// for the type Rules.
+func RulesValues() []Rules {
+ return _RulesValues
+}
+
+// Values returns all possible values
+// for the type Rules.
+func (i Rules) Values() []enums.Enum {
+ res := make([]enums.Enum, len(_RulesValues))
+ for i, d := range _RulesValues {
+ res[i] = d
+ }
+ return res
+}
+
+// IsValid returns whether the value is a
+// valid option for type Rules.
+func (i Rules) IsValid() bool {
+ _, ok := _RulesMap[i]
+ return ok
+}
+
+// MarshalText implements the [encoding.TextMarshaler] interface.
+func (i Rules) MarshalText() ([]byte, error) {
+ return []byte(i.String()), nil
+}
+
+// UnmarshalText implements the [encoding.TextUnmarshaler] interface.
+func (i *Rules) UnmarshalText(text []byte) error {
+ if err := i.SetString(string(text)); err != nil {
+ log.Println(err)
+ }
+ return nil
+}
diff --git a/kinase/gtigen.go b/kinase/gtigen.go
new file mode 100644
index 000000000..a008dcbad
--- /dev/null
+++ b/kinase/gtigen.go
@@ -0,0 +1,65 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package kinase
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/kinase.CaDtParams",
+ ShortName: "kinase.CaDtParams",
+ IDName: "ca-dt-params",
+ Doc: "CaDtParams has rate constants for integrating Ca calcium\nat different time scales, including final CaP = CaMKII and CaD = DAPK1\ntimescales for LTP potentiation vs. LTD depression factors.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gosl", Directive: "start", Args: []string{"kinase"}},
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"MTau", >i.Field{Name: "MTau", Type: "float32", LocalType: "float32", Doc: "CaM (calmodulin) time constant in cycles (msec) -- for synaptic-level integration this integrates on top of Ca signal from send->CaSyn * recv->CaSyn, each of which are typically integrated with a 30 msec Tau.", Directives: gti.Directives{}, Tag: "def:\"2,5\" min:\"1\""}},
+ {"PTau", >i.Field{Name: "PTau", Type: "float32", LocalType: "float32", Doc: "LTP spike-driven Ca factor (CaP) time constant in cycles (msec), simulating CaMKII in the Kinase framework, with 40 on top of MTau roughly tracking the biophysical rise time. Computationally, CaP represents the plus phase learning signal that reflects the most recent past information.", Directives: gti.Directives{}, Tag: "def:\"39\" min:\"1\""}},
+ {"DTau", >i.Field{Name: "DTau", Type: "float32", LocalType: "float32", Doc: "LTD spike-driven Ca factor (CaD) time constant in cycles (msec), simulating DAPK1 in Kinase framework. Computationally, CaD represents the minus phase learning signal that reflects the expectation representation prior to experiencing the outcome (in addition to the outcome). For integration equations, this cannot be identical to PTau.", Directives: gti.Directives{}, Tag: "def:\"41\" min:\"1\""}},
+ {"ExpAdj", >i.Field{Name: "ExpAdj", Type: "goki.dev/gosl/v2/slbool.Bool", LocalType: "slbool.Bool", Doc: "if true, adjust dt time constants when using exponential integration equations to compensate for difference between discrete and continuous integration", Directives: gti.Directives{}, Tag: ""}},
+ {"MDt", >i.Field{Name: "MDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\" inactive:\"+\""}},
+ {"PDt", >i.Field{Name: "PDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\" inactive:\"+\""}},
+ {"DDt", >i.Field{Name: "DDt", Type: "float32", LocalType: "float32", Doc: "rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\" inactive:\"+\""}},
+ {"M4Dt", >i.Field{Name: "M4Dt", Type: "float32", LocalType: "float32", Doc: "4 * rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\" inactive:\"+\""}},
+ {"P4Dt", >i.Field{Name: "P4Dt", Type: "float32", LocalType: "float32", Doc: "4 * rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\" inactive:\"+\""}},
+ {"D4Dt", >i.Field{Name: "D4Dt", Type: "float32", LocalType: "float32", Doc: "4 * rate = 1 / tau", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\" inactive:\"+\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/kinase.CaParams",
+ ShortName: "kinase.CaParams",
+ IDName: "ca-params",
+ Doc: "CaParams has rate constants for integrating spike-driven Ca calcium\nat different time scales, including final CaP = CaMKII and CaD = DAPK1\ntimescales for LTP potentiation vs. LTD depression factors.",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ },
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"SpikeG", >i.Field{Name: "SpikeG", Type: "float32", LocalType: "float32", Doc: "spiking gain factor for SynSpk learning rule variants. This alters the overall range of values, keeping them in roughly the unit scale, and affects effective learning rate.", Directives: gti.Directives{}, Tag: "def:\"12\""}},
+ {"MaxISI", >i.Field{Name: "MaxISI", Type: "int32", LocalType: "int32", Doc: "maximum ISI for integrating in Opt mode -- above that just set to 0", Directives: gti.Directives{}, Tag: "def:\"100\""}},
+ {"pad", >i.Field{Name: "pad", Type: "int32", LocalType: "int32", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"Dt", >i.Field{Name: "Dt", Type: "github.com/emer/axon/kinase.CaDtParams", LocalType: "CaDtParams", Doc: "time constants for integrating at M, P, and D cascading levels", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/kinase.Rules",
+ ShortName: "kinase.Rules",
+ IDName: "rules",
+ Doc: "Rules are different options for Kinase-based learning rules\nThese are now implemented using separate Prjn types in kinasex",
+ Directives: gti.Directives{
+ >i.Directive{Tool: "go", Directive: "generate", Args: []string{"goki", "generate", "-add-types"}},
+ >i.Directive{Tool: "enums", Directive: "enum", Args: []string{}},
+ },
+
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/kinase/params.go b/kinase/params.go
index 6b986b33e..36e5d012a 100644
--- a/kinase/params.go
+++ b/kinase/params.go
@@ -5,8 +5,8 @@
package kinase
import (
- "github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "goki.dev/gosl/v2/slbool"
+ "goki.dev/mat32/v2"
)
//gosl: start kinase
@@ -14,37 +14,37 @@ import (
// CaDtParams has rate constants for integrating Ca calcium
// at different time scales, including final CaP = CaMKII and CaD = DAPK1
// timescales for LTP potentiation vs. LTD depression factors.
-type CaDtParams struct {
+type CaDtParams struct { //gti:add
- // [def: 2,5] [min: 1] CaM (calmodulin) time constant in cycles (msec) -- for synaptic-level integration this integrates on top of Ca signal from send->CaSyn * recv->CaSyn, each of which are typically integrated with a 30 msec Tau.
- MTau float32 `def:"2,5" min:"1" desc:"CaM (calmodulin) time constant in cycles (msec) -- for synaptic-level integration this integrates on top of Ca signal from send->CaSyn * recv->CaSyn, each of which are typically integrated with a 30 msec Tau."`
+ // CaM (calmodulin) time constant in cycles (msec) -- for synaptic-level integration this integrates on top of Ca signal from send->CaSyn * recv->CaSyn, each of which are typically integrated with a 30 msec Tau.
+ MTau float32 `def:"2,5" min:"1"`
- // [def: 39] [min: 1] LTP spike-driven Ca factor (CaP) time constant in cycles (msec), simulating CaMKII in the Kinase framework, with 40 on top of MTau roughly tracking the biophysical rise time. Computationally, CaP represents the plus phase learning signal that reflects the most recent past information.
- PTau float32 `def:"39" min:"1" desc:"LTP spike-driven Ca factor (CaP) time constant in cycles (msec), simulating CaMKII in the Kinase framework, with 40 on top of MTau roughly tracking the biophysical rise time. Computationally, CaP represents the plus phase learning signal that reflects the most recent past information."`
+ // LTP spike-driven Ca factor (CaP) time constant in cycles (msec), simulating CaMKII in the Kinase framework, with 40 on top of MTau roughly tracking the biophysical rise time. Computationally, CaP represents the plus phase learning signal that reflects the most recent past information.
+ PTau float32 `def:"39" min:"1"`
- // [def: 41] [min: 1] LTD spike-driven Ca factor (CaD) time constant in cycles (msec), simulating DAPK1 in Kinase framework. Computationally, CaD represents the minus phase learning signal that reflects the expectation representation prior to experiencing the outcome (in addition to the outcome). For integration equations, this cannot be identical to PTau.
- DTau float32 `def:"41" min:"1" desc:"LTD spike-driven Ca factor (CaD) time constant in cycles (msec), simulating DAPK1 in Kinase framework. Computationally, CaD represents the minus phase learning signal that reflects the expectation representation prior to experiencing the outcome (in addition to the outcome). For integration equations, this cannot be identical to PTau."`
+ // LTD spike-driven Ca factor (CaD) time constant in cycles (msec), simulating DAPK1 in Kinase framework. Computationally, CaD represents the minus phase learning signal that reflects the expectation representation prior to experiencing the outcome (in addition to the outcome). For integration equations, this cannot be identical to PTau.
+ DTau float32 `def:"41" min:"1"`
// if true, adjust dt time constants when using exponential integration equations to compensate for difference between discrete and continuous integration
- ExpAdj slbool.Bool `desc:"if true, adjust dt time constants when using exponential integration equations to compensate for difference between discrete and continuous integration"`
+ ExpAdj slbool.Bool
- // [view: -] rate = 1 / tau
- MDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ MDt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
- // [view: -] rate = 1 / tau
- PDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ PDt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
- // [view: -] rate = 1 / tau
- DDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ DDt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
- // [view: -] 4 * rate = 1 / tau
- M4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"4 * rate = 1 / tau"`
+ // 4 * rate = 1 / tau
+ M4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
- // [view: -] 4 * rate = 1 / tau
- P4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"4 * rate = 1 / tau"`
+ // 4 * rate = 1 / tau
+ P4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
- // [view: -] 4 * rate = 1 / tau
- D4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"4 * rate = 1 / tau"`
+ // 4 * rate = 1 / tau
+ D4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
pad, pad1 int32
}
@@ -107,18 +107,18 @@ func (kp *CaDtParams) CaAtT(ti int32, caM, caP, caD *float32) {
// CaParams has rate constants for integrating spike-driven Ca calcium
// at different time scales, including final CaP = CaMKII and CaD = DAPK1
// timescales for LTP potentiation vs. LTD depression factors.
-type CaParams struct {
+type CaParams struct { //gti:add
- // [def: 12] spiking gain factor for SynSpk learning rule variants. This alters the overall range of values, keeping them in roughly the unit scale, and affects effective learning rate.
- SpikeG float32 `def:"12" desc:"spiking gain factor for SynSpk learning rule variants. This alters the overall range of values, keeping them in roughly the unit scale, and affects effective learning rate."`
+ // spiking gain factor for SynSpk learning rule variants. This alters the overall range of values, keeping them in roughly the unit scale, and affects effective learning rate.
+ SpikeG float32 `def:"12"`
- // [def: 100] maximum ISI for integrating in Opt mode -- above that just set to 0
- MaxISI int32 `def:"100" desc:"maximum ISI for integrating in Opt mode -- above that just set to 0"`
+ // maximum ISI for integrating in Opt mode -- above that just set to 0
+ MaxISI int32 `def:"100"`
pad, pad1 int32
- // [view: inline] time constants for integrating at M, P, and D cascading levels
- Dt CaDtParams `view:"inline" desc:"time constants for integrating at M, P, and D cascading levels"`
+ // time constants for integrating at M, P, and D cascading levels
+ Dt CaDtParams `view:"inline"`
}
func (kp *CaParams) Defaults() {
diff --git a/kinase/plot/gtigen.go b/kinase/plot/gtigen.go
new file mode 100644
index 000000000..2a78aed5a
--- /dev/null
+++ b/kinase/plot/gtigen.go
@@ -0,0 +1,39 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package main
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "main.Sim",
+ ShortName: "main.Sim",
+ IDName: "sim",
+ Doc: "Sim holds the params, table, etc",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"CaDt", >i.Field{Name: "CaDt", Type: "github.com/emer/axon/kinase.CaParams", LocalType: "kinase.CaParams", Doc: "Ca time constants", Directives: gti.Directives{}, Tag: "view:\"inline\""}},
+ {"Minit", >i.Field{Name: "Minit", Type: "float64", LocalType: "float64", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"Pinit", >i.Field{Name: "Pinit", Type: "float64", LocalType: "float64", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"Dinit", >i.Field{Name: "Dinit", Type: "float64", LocalType: "float64", Doc: "", Directives: gti.Directives{}, Tag: ""}},
+ {"MdtAdj", >i.Field{Name: "MdtAdj", Type: "float64", LocalType: "float64", Doc: "adjustment to dt to account for discrete time updating", Directives: gti.Directives{}, Tag: "def:\"0,0.11\""}},
+ {"PdtAdj", >i.Field{Name: "PdtAdj", Type: "float64", LocalType: "float64", Doc: "adjustment to dt to account for discrete time updating", Directives: gti.Directives{}, Tag: "def:\"0.0.03\""}},
+ {"DdtAdj", >i.Field{Name: "DdtAdj", Type: "float64", LocalType: "float64", Doc: "adjustment to dt to account for discrete time updating", Directives: gti.Directives{}, Tag: "def:\"0.0.03\""}},
+ {"TimeSteps", >i.Field{Name: "TimeSteps", Type: "int", LocalType: "int", Doc: "number of time steps", Directives: gti.Directives{}, Tag: ""}},
+ {"Table", >i.Field{Name: "Table", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"Plot", >i.Field{Name: "Plot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ {"TimeTable", >i.Field{Name: "TimeTable", Type: "*goki.dev/etable/v2/etable.Table", LocalType: "*etable.Table", Doc: "table for plot", Directives: gti.Directives{}, Tag: "view:\"no-inline\""}},
+ {"TimePlot", >i.Field{Name: "TimePlot", Type: "*goki.dev/etable/v2/eplot.Plot2D", LocalType: "*eplot.Plot2D", Doc: "the plot", Directives: gti.Directives{}, Tag: "view:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{
+ {"Run", >i.Method{Name: "Run", Doc: "Run runs the equation.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ {"TimeRun", >i.Method{Name: "TimeRun", Doc: "TimeRun runs the equation over time.", Directives: gti.Directives{
+ >i.Directive{Tool: "gti", Directive: "add", Args: []string{}},
+ }, Args: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}), Returns: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{})}},
+ }),
+})
diff --git a/kinase/plot/synca_plot.go b/kinase/plot/synca_plot.go
index 033021ac3..5889db84a 100644
--- a/kinase/plot/synca_plot.go
+++ b/kinase/plot/synca_plot.go
@@ -5,33 +5,32 @@
// synca_plot plots kinase SynCa update equations
package main
+//go:generate goki generate -add-types
+
import (
"math"
"strconv"
"github.com/emer/axon/kinase"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- _ "github.com/emer/etable/etview" // include to get gui views
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
- _ "github.com/goki/gosl/slboolview" // ditto
- "github.com/goki/ki/ki"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ _ "goki.dev/etable/v2/etview" // include to get gui views
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ _ "goki.dev/gosl/v2/slboolview" // ditto
+ "goki.dev/icons"
)
-func main() {
- TheSim.Config()
- gimain.Main(func() { // this starts gui -- requires valid OpenGL display connection (e.g., X11)
- guirun()
- })
-}
+func main() { gimain.Run(app) }
-func guirun() {
- win := TheSim.ConfigGui()
- win.StartEventLoop()
+func app() {
+ sim := &Sim{}
+ sim.Config()
+ sim.Run()
+ b := sim.ConfigGUI()
+ b.NewWindow().Run().Wait()
}
// LogPrec is precision for saving float values in logs
@@ -40,46 +39,37 @@ const LogPrec = 4
// Sim holds the params, table, etc
type Sim struct {
- // [view: inline] Ca time constants
- CaDt kinase.CaParams `view:"inline" desc:"Ca time constants"`
+ // Ca time constants
+ CaDt kinase.CaParams `view:"inline"`
Minit float64
Pinit float64
Dinit float64
- // [def: 0,0.11] adjustment to dt to account for discrete time updating
- MdtAdj float64 `def:"0,0.11" desc:"adjustment to dt to account for discrete time updating"`
+ // adjustment to dt to account for discrete time updating
+ MdtAdj float64 `def:"0,0.11"`
- // [def: 0.0.03] adjustment to dt to account for discrete time updating
- PdtAdj float64 `def:"0.0.03" desc:"adjustment to dt to account for discrete time updating"`
+ // adjustment to dt to account for discrete time updating
+ PdtAdj float64 `def:"0.0.03"`
- // [def: 0.0.03] adjustment to dt to account for discrete time updating
- DdtAdj float64 `def:"0.0.03" desc:"adjustment to dt to account for discrete time updating"`
+ // adjustment to dt to account for discrete time updating
+ DdtAdj float64 `def:"0.0.03"`
// number of time steps
- TimeSteps int `desc:"number of time steps"`
+ TimeSteps int
- // [view: no-inline] table for plot
- Table *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ Table *etable.Table `view:"no-inline"`
- // [view: -] the plot
- Plot *eplot.Plot2D `view:"-" desc:"the plot"`
+ // the plot
+ Plot *eplot.Plot2D `view:"-"`
- // [view: no-inline] table for plot
- TimeTable *etable.Table `view:"no-inline" desc:"table for plot"`
+ // table for plot
+ TimeTable *etable.Table `view:"no-inline"`
- // [view: -] the plot
- TimePlot *eplot.Plot2D `view:"-" desc:"the plot"`
-
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
-
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the plot
+ TimePlot *eplot.Plot2D `view:"-"`
}
-// TheSim is the overall state for this simulation
-var TheSim Sim
-
// Config configures all the elements using the standard functions
func (ss *Sim) Config() {
ss.CaDt.Defaults()
@@ -102,7 +92,7 @@ func (ss *Sim) Update() {
}
// Run runs the equation.
-func (ss *Sim) Run() {
+func (ss *Sim) Run() { //gti:add
ss.Update()
dt := ss.Table
@@ -161,7 +151,9 @@ func (ss *Sim) Run() {
di += float64(ss.CaDt.Dt.DDt) * (pi - di)
}
- ss.Plot.Update()
+ if ss.Plot != nil {
+ ss.Plot.UpdatePlot()
+ }
}
func (ss *Sim) ConfigTable(dt *etable.Table) {
@@ -205,7 +197,7 @@ func (ss *Sim) ConfigPlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D {
/////////////////////////////////////////////////////////////////
// TimeRun runs the equation over time.
-func (ss *Sim) TimeRun() {
+func (ss *Sim) TimeRun() { //gti:add
ss.Update()
/*
dt := ss.TimeTable
@@ -256,73 +248,28 @@ func (ss *Sim) ConfigTimePlot(plt *eplot.Plot2D, dt *etable.Table) *eplot.Plot2D
return plt
}
-// ConfigGui configures the GoGi gui interface for this simulation,
-func (ss *Sim) ConfigGui() *gi.Window {
- width := 1600
- height := 1200
-
- // gi.WinEventTrace = true
-
- gi.SetAppName("synca_plot")
- gi.SetAppAbout(`This plots an equation. See emergent on GitHub.`)
-
- win := gi.NewMainWindow("syncaplot", "Plotting Equations", width, height)
- ss.Win = win
-
- vp := win.WinViewport2D()
- updt := vp.UpdateStart()
+// ConfigGUI configures the GoGi gui interface for this simulation,
+func (ss *Sim) ConfigGUI() *gi.Body {
+ b := gi.NewAppBody("synca_plot").SetTitle("Plotting Equations")
- mfr := win.SetMainFrame()
-
- tbar := gi.AddNewToolBar(mfr, "tbar")
- tbar.SetStretchMaxWidth()
- ss.ToolBar = tbar
-
- split := gi.AddNewSplitView(mfr, "split")
- split.Dim = mat32.X
- split.SetStretchMax()
-
- sv := giv.AddNewStructView(split, "sv")
+ split := gi.NewSplits(b, "split")
+ sv := giv.NewStructView(split, "sv")
sv.SetStruct(ss)
- tv := gi.AddNewTabView(split, "tv")
+ tv := gi.NewTabs(split, "tv")
- plt := tv.AddNewTab(eplot.KiT_Plot2D, "T Exp Plot").(*eplot.Plot2D)
- ss.Plot = ss.ConfigPlot(plt, ss.Table)
+ ss.Plot = eplot.NewSubPlot(tv.NewTab("T Exp Plot"))
+ ss.ConfigPlot(ss.Plot, ss.Table)
- plt = tv.AddNewTab(eplot.KiT_Plot2D, "TimePlot").(*eplot.Plot2D)
- ss.TimePlot = ss.ConfigTimePlot(plt, ss.TimeTable)
+ ss.TimePlot = eplot.NewSubPlot(tv.NewTab("TimePlot"))
+ ss.ConfigTimePlot(ss.TimePlot, ss.TimeTable)
split.SetSplits(.3, .7)
- tbar.AddAction(gi.ActOpts{Label: "T Exp Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.Run()
- vp.SetNeedsFullRender()
- })
-
- tbar.AddAction(gi.ActOpts{Label: "Time Run", Icon: "update", Tooltip: "Run the equations and plot results."}, win.This(), func(recv, send ki.Ki, sig int64, data interface{}) {
- ss.TimeRun()
- vp.SetNeedsFullRender()
+ b.AddAppBar(func(tb *gi.Toolbar) {
+ giv.NewFuncButton(tb, ss.Run).SetIcon(icons.PlayArrow)
+ giv.NewFuncButton(tb, ss.TimeRun).SetIcon(icons.PlayArrow)
})
- tbar.AddAction(gi.ActOpts{Label: "README", Icon: "file-markdown", Tooltip: "Opens your browser on the README file that contains instructions for how to run this model."}, win.This(),
- func(recv, send ki.Ki, sig int64, data interface{}) {
- gi.OpenURL("https://github.com/emer/axon/blob/master/chans/synca_plot/README.md")
- })
-
- vp.UpdateEndNoSig(updt)
-
- // main menu
- appnm := gi.AppName()
- mmen := win.MainMenu
- mmen.ConfigMenus([]string{appnm, "File", "Edit", "Window"})
-
- amen := win.MainMenu.ChildByName(appnm, 0).(*gi.Action)
- amen.Menu.AddAppMenu(win)
-
- emen := win.MainMenu.ChildByName("Edit", 1).(*gi.Action)
- emen.Menu.AddCopyCutPaste(win)
-
- win.MainMenuUpdated()
- return win
+ return b
}
diff --git a/kinase/rules.go b/kinase/rules.go
index ed73b2438..ce2d414b8 100644
--- a/kinase/rules.go
+++ b/kinase/rules.go
@@ -4,18 +4,11 @@
package kinase
-import "github.com/goki/ki/kit"
+//go:generate goki generate -add-types
// Rules are different options for Kinase-based learning rules
// These are now implemented using separate Prjn types in kinasex
-type Rules int32
-
-//go:generate stringer -type=Rules
-
-var KiT_Rules = kit.Enums.AddEnum(RulesN, kit.NotBitFlag, nil)
-
-func (ev Rules) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
-func (ev *Rules) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
+type Rules int32 //enums:enum
// The different versions of Kinase learning rules
const (
@@ -54,6 +47,4 @@ const (
// It exhibits strong sensitivity to final spikes and thus
// high levels of variance.
NeurSpkTheta
-
- RulesN
)
diff --git a/kinase/rules_string.go b/kinase/rules_string.go
deleted file mode 100644
index 5af5b4d6b..000000000
--- a/kinase/rules_string.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Code generated by "stringer -type=Rules"; DO NOT EDIT.
-
-package kinase
-
-import (
- "errors"
- "strconv"
-)
-
-var _ = errors.New("dummy error")
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[SynSpkCont-0]
- _ = x[SynNMDACont-1]
- _ = x[SynSpkTheta-2]
- _ = x[NeurSpkTheta-3]
- _ = x[RulesN-4]
-}
-
-const _Rules_name = "SynSpkContSynNMDAContSynSpkThetaNeurSpkThetaRulesN"
-
-var _Rules_index = [...]uint8{0, 10, 21, 32, 44, 50}
-
-func (i Rules) String() string {
- if i < 0 || i >= Rules(len(_Rules_index)-1) {
- return "Rules(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Rules_name[_Rules_index[i]:_Rules_index[i+1]]
-}
-
-func (i *Rules) FromString(s string) error {
- for j := 0; j < len(_Rules_index)-1; j++ {
- if s == _Rules_name[_Rules_index[j]:_Rules_index[j+1]] {
- *i = Rules(j)
- return nil
- }
- }
- return errors.New("String: " + s + " is not a valid option for type: Rules")
-}
diff --git a/kinasex/contprjn.go b/kinasex/contprjn.go
index 270a5acbb..227810acb 100644
--- a/kinasex/contprjn.go
+++ b/kinasex/contprjn.go
@@ -15,19 +15,19 @@ import (
type KinContParams struct {
// which learning rule to use -- can select std SynSpkTheta or Cont variants that are only supported in this specialized Prjn
- Rule kinase.Rules `desc:"which learning rule to use -- can select std SynSpkTheta or Cont variants that are only supported in this specialized Prjn"`
+ Rule kinase.Rules
- // [def: 0.8] gain factor for SynNMDACont learning rule variant. This factor is set to generally equate calcium levels and learning rate with SynSpk variants. In some models, 2 is the best, while others require higher values.
- NMDAG float32 `def:"0.8" desc:"gain factor for SynNMDACont learning rule variant. This factor is set to generally equate calcium levels and learning rate with SynSpk variants. In some models, 2 is the best, while others require higher values."`
+ // gain factor for SynNMDACont learning rule variant. This factor is set to generally equate calcium levels and learning rate with SynSpk variants. In some models, 2 is the best, while others require higher values.
+ NMDAG float32 `def:"0.8"`
// number of msec (cycles) after either a pre or postsynaptic spike, when the competitive binding of CaMKII vs. DAPK1 to NMDA N2B takes place, generating the provisional weight change value that can then turn into the actual weight change DWt
- TWindow int `desc:"number of msec (cycles) after either a pre or postsynaptic spike, when the competitive binding of CaMKII vs. DAPK1 to NMDA N2B takes place, generating the provisional weight change value that can then turn into the actual weight change DWt"`
+ TWindow int
- // [def: 0.5] proportion of CaDMax below which DWt is updated -- when CaD (DAPK1) decreases this much off of its recent peak level, then the residual CaMKII relative balance (represented by TDWt) drives AMPAR trafficking and longer timescale synaptic plasticity changes
- DMaxPct float32 `def:"0.5" desc:"proportion of CaDMax below which DWt is updated -- when CaD (DAPK1) decreases this much off of its recent peak level, then the residual CaMKII relative balance (represented by TDWt) drives AMPAR trafficking and longer timescale synaptic plasticity changes"`
+ // proportion of CaDMax below which DWt is updated -- when CaD (DAPK1) decreases this much off of its recent peak level, then the residual CaMKII relative balance (represented by TDWt) drives AMPAR trafficking and longer timescale synaptic plasticity changes
+ DMaxPct float32 `def:"0.5"`
- // [def: 1,0.93,1.05] scaling factor on CaD as it enters into the learning rule, to compensate for systematic differences in CaD vs. CaP levels (only potentially needed for SynNMDACa)
- DScale float32 `def:"1,0.93,1.05" desc:"scaling factor on CaD as it enters into the learning rule, to compensate for systematic differences in CaD vs. CaP levels (only potentially needed for SynNMDACa)"`
+ // scaling factor on CaD as it enters into the learning rule, to compensate for systematic differences in CaD vs. CaP levels (only potentially needed for SynNMDACa)
+ DScale float32 `def:"1,0.93,1.05"`
}
func (kp *KinContParams) Defaults() {
@@ -87,11 +87,11 @@ func (kp *KinContParams) DWtFmTDWt(sy *Synapse, lr float32) bool {
type ContPrjn struct {
axon.Prjn // access as .Prjn
- // [view: inline] kinase continuous learning rule params
- Cont KinContParams `view:"inline" desc:"kinase continuous learning rule params"`
+ // kinase continuous learning rule params
+ Cont KinContParams `view:"inline"`
// continuous synaptic state values, ordered by the sending layer units which owns them -- one-to-one with SendConIdx array
- ContSyns []ContSyn `desc:"continuous synaptic state values, ordered by the sending layer units which owns them -- one-to-one with SendConIdx array"`
+ ContSyns []ContSyn
}
func (pj *ContPrjn) Defaults() {
diff --git a/kinasex/contsyn.go b/kinasex/contsyn.go
index 2681ab789..beb1a7864 100644
--- a/kinasex/contsyn.go
+++ b/kinasex/contsyn.go
@@ -4,16 +4,16 @@
package kinasex
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
// ContSyn holds extra synaptic state for continuous learning
type ContSyn struct {
// transitional, temporary DWt value, which is updated in a window after synaptic activity when Ca levels are still elevated, and added to the DWt value after a longer break of spiking where there is enough time for CaMKII driven AMPA receptor trafficking to take place
- TDWt float32 `desc:"transitional, temporary DWt value, which is updated in a window after synaptic activity when Ca levels are still elevated, and added to the DWt value after a longer break of spiking where there is enough time for CaMKII driven AMPA receptor trafficking to take place"`
+ TDWt float32
// maximum CaD value since last DWt change -- DWt occurs when current CaD has decreased by a given proportion from this recent peak
- CaDMax float32 `desc:"maximum CaD value since last DWt change -- DWt occurs when current CaD has decreased by a given proportion from this recent peak"`
+ CaDMax float32
}
// VarByName returns synapse variable by name
diff --git a/nxx1/gtigen.go b/nxx1/gtigen.go
new file mode 100644
index 000000000..318885a8b
--- /dev/null
+++ b/nxx1/gtigen.go
@@ -0,0 +1,34 @@
+// Code generated by "goki generate -add-types"; DO NOT EDIT.
+
+package nxx1
+
+import (
+ "goki.dev/gti"
+ "goki.dev/ordmap"
+)
+
+var _ = gti.AddType(>i.Type{
+ Name: "github.com/emer/axon/nxx1.Params",
+ ShortName: "nxx1.Params",
+ IDName: "params",
+ Doc: "Params are the Noisy X/(X+1) rate-coded activation function parameters.\nThis function well-characterizes the neural response function empirically,\nas a saturating sigmoid-like nonlinear response with an initial largely-linear regime.\nThe basic x/(x+1) sigmoid function is convolved with a gaussian noise kernel to produce\na better approximation of the effects of noise on neural firing -- the main effect is\nto create a continuous graded early level of firing even slightly below threshold, softening\nthe otherwise hard transition to firing at threshold.\nA hand-optimized piece-wise function approximation is used to generate the NXX1 function\ninstead of requiring a lookup table of the gaussian convolution. This is much easier\nto use across a range of computational platforms including GPU's, and produces very similar\noverall values. abc.",
+ Directives: gti.Directives{},
+ Fields: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{
+ {"Thr", >i.Field{Name: "Thr", Type: "float32", LocalType: "float32", Doc: "threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization", Directives: gti.Directives{}, Tag: "def:\"0.5\""}},
+ {"Gain", >i.Field{Name: "Gain", Type: "float32", LocalType: "float32", Doc: "gain (gamma) of the rate-coded activation functions -- 100 is default, 80 works better for larger models, and 20 is closer to the actual spiking behavior of the AdEx model -- use lower values for more graded signals, generally in lower input/sensory layers of the network", Directives: gti.Directives{}, Tag: "def:\"80,100,40,20\" min:\"0\""}},
+ {"NVar", >i.Field{Name: "NVar", Type: "float32", LocalType: "float32", Doc: "variance of the Gaussian noise kernel for convolving with XX1 in NOISY_XX1 and NOISY_LINEAR -- determines the level of curvature of the activation function near the threshold -- increase for more graded responding there -- note that this is not actual stochastic noise, just constant convolved gaussian smoothness to the activation function", Directives: gti.Directives{}, Tag: "def:\"0.005,0.01\" min:\"0\""}},
+ {"VmActThr", >i.Field{Name: "VmActThr", Type: "float32", LocalType: "float32", Doc: "threshold on activation below which the direct vm - act.thr is used -- this should be low -- once it gets active should use net - g_e_thr ge-linear dynamics (gelin)", Directives: gti.Directives{}, Tag: "def:\"0.01\""}},
+ {"SigMult", >i.Field{Name: "SigMult", Type: "float32", LocalType: "float32", Doc: "multiplier on sigmoid used for computing values for net < thr", Directives: gti.Directives{}, Tag: "def:\"0.33\" view:\"-\" json:\"-\" xml:\"-\""}},
+ {"SigMultPow", >i.Field{Name: "SigMultPow", Type: "float32", LocalType: "float32", Doc: "power for computing sig_mult_eff as function of gain * nvar", Directives: gti.Directives{}, Tag: "def:\"0.8\" view:\"-\" json:\"-\" xml:\"-\""}},
+ {"SigGain", >i.Field{Name: "SigGain", Type: "float32", LocalType: "float32", Doc: "gain multipler on (net - thr) for sigmoid used for computing values for net < thr", Directives: gti.Directives{}, Tag: "def:\"3\" view:\"-\" json:\"-\" xml:\"-\""}},
+ {"InterpRange", >i.Field{Name: "InterpRange", Type: "float32", LocalType: "float32", Doc: "interpolation range above zero to use interpolation", Directives: gti.Directives{}, Tag: "def:\"0.01\" view:\"-\" json:\"-\" xml:\"-\""}},
+ {"GainCorRange", >i.Field{Name: "GainCorRange", Type: "float32", LocalType: "float32", Doc: "range in units of nvar over which to apply gain correction to compensate for convolution", Directives: gti.Directives{}, Tag: "def:\"10\" view:\"-\" json:\"-\" xml:\"-\""}},
+ {"GainCor", >i.Field{Name: "GainCor", Type: "float32", LocalType: "float32", Doc: "gain correction multiplier -- how much to correct gains", Directives: gti.Directives{}, Tag: "def:\"0.1\" view:\"-\" json:\"-\" xml:\"-\""}},
+ {"SigGainNVar", >i.Field{Name: "SigGainNVar", Type: "float32", LocalType: "float32", Doc: "sig_gain / nvar", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"SigMultEff", >i.Field{Name: "SigMultEff", Type: "float32", LocalType: "float32", Doc: "overall multiplier on sigmoidal component for values below threshold = sig_mult * pow(gain * nvar, sig_mult_pow)", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"SigValAt0", >i.Field{Name: "SigValAt0", Type: "float32", LocalType: "float32", Doc: "0.5 * sig_mult_eff -- used for interpolation portion", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ {"InterpVal", >i.Field{Name: "InterpVal", Type: "float32", LocalType: "float32", Doc: "function value at interp_range - sig_val_at_0 -- for interpolation", Directives: gti.Directives{}, Tag: "view:\"-\" json:\"-\" xml:\"-\""}},
+ }),
+ Embeds: ordmap.Make([]ordmap.KeyVal[string, *gti.Field]{}),
+ Methods: ordmap.Make([]ordmap.KeyVal[string, *gti.Method]{}),
+})
diff --git a/nxx1/nxx1.go b/nxx1/nxx1.go
index bea1f93ed..2e9d6e0ab 100644
--- a/nxx1/nxx1.go
+++ b/nxx1/nxx1.go
@@ -19,8 +19,10 @@ overall values.
*/
package nxx1
+//go:generate goki generate -add-types
+
import (
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
// Params are the Noisy X/(X+1) rate-coded activation function parameters.
@@ -36,47 +38,47 @@ import (
// overall values. abc.
type Params struct {
- // [def: 0.5] threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization
- Thr float32 `def:"0.5" desc:"threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization"`
+ // threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization
+ Thr float32 `def:"0.5"`
- // [def: 80,100,40,20] [min: 0] gain (gamma) of the rate-coded activation functions -- 100 is default, 80 works better for larger models, and 20 is closer to the actual spiking behavior of the AdEx model -- use lower values for more graded signals, generally in lower input/sensory layers of the network
- Gain float32 `def:"80,100,40,20" min:"0" desc:"gain (gamma) of the rate-coded activation functions -- 100 is default, 80 works better for larger models, and 20 is closer to the actual spiking behavior of the AdEx model -- use lower values for more graded signals, generally in lower input/sensory layers of the network"`
+ // gain (gamma) of the rate-coded activation functions -- 100 is default, 80 works better for larger models, and 20 is closer to the actual spiking behavior of the AdEx model -- use lower values for more graded signals, generally in lower input/sensory layers of the network
+ Gain float32 `def:"80,100,40,20" min:"0"`
- // [def: 0.005,0.01] [min: 0] variance of the Gaussian noise kernel for convolving with XX1 in NOISY_XX1 and NOISY_LINEAR -- determines the level of curvature of the activation function near the threshold -- increase for more graded responding there -- note that this is not actual stochastic noise, just constant convolved gaussian smoothness to the activation function
- NVar float32 `def:"0.005,0.01" min:"0" desc:"variance of the Gaussian noise kernel for convolving with XX1 in NOISY_XX1 and NOISY_LINEAR -- determines the level of curvature of the activation function near the threshold -- increase for more graded responding there -- note that this is not actual stochastic noise, just constant convolved gaussian smoothness to the activation function"`
+ // variance of the Gaussian noise kernel for convolving with XX1 in NOISY_XX1 and NOISY_LINEAR -- determines the level of curvature of the activation function near the threshold -- increase for more graded responding there -- note that this is not actual stochastic noise, just constant convolved gaussian smoothness to the activation function
+ NVar float32 `def:"0.005,0.01" min:"0"`
- // [def: 0.01] threshold on activation below which the direct vm - act.thr is used -- this should be low -- once it gets active should use net - g_e_thr ge-linear dynamics (gelin)
- VmActThr float32 `def:"0.01" desc:"threshold on activation below which the direct vm - act.thr is used -- this should be low -- once it gets active should use net - g_e_thr ge-linear dynamics (gelin)"`
+ // threshold on activation below which the direct vm - act.thr is used -- this should be low -- once it gets active should use net - g_e_thr ge-linear dynamics (gelin)
+ VmActThr float32 `def:"0.01"`
- // [def: 0.33] [view: -] multiplier on sigmoid used for computing values for net < thr
- SigMult float32 `def:"0.33" view:"-" json:"-" xml:"-" desc:"multiplier on sigmoid used for computing values for net < thr"`
+ // multiplier on sigmoid used for computing values for net < thr
+ SigMult float32 `def:"0.33" view:"-" json:"-" xml:"-"`
- // [def: 0.8] [view: -] power for computing sig_mult_eff as function of gain * nvar
- SigMultPow float32 `def:"0.8" view:"-" json:"-" xml:"-" desc:"power for computing sig_mult_eff as function of gain * nvar"`
+ // power for computing sig_mult_eff as function of gain * nvar
+ SigMultPow float32 `def:"0.8" view:"-" json:"-" xml:"-"`
- // [def: 3] [view: -] gain multipler on (net - thr) for sigmoid used for computing values for net < thr
- SigGain float32 `def:"3" view:"-" json:"-" xml:"-" desc:"gain multipler on (net - thr) for sigmoid used for computing values for net < thr"`
+ // gain multipler on (net - thr) for sigmoid used for computing values for net < thr
+ SigGain float32 `def:"3" view:"-" json:"-" xml:"-"`
- // [def: 0.01] [view: -] interpolation range above zero to use interpolation
- InterpRange float32 `def:"0.01" view:"-" json:"-" xml:"-" desc:"interpolation range above zero to use interpolation"`
+ // interpolation range above zero to use interpolation
+ InterpRange float32 `def:"0.01" view:"-" json:"-" xml:"-"`
- // [def: 10] [view: -] range in units of nvar over which to apply gain correction to compensate for convolution
- GainCorRange float32 `def:"10" view:"-" json:"-" xml:"-" desc:"range in units of nvar over which to apply gain correction to compensate for convolution"`
+ // range in units of nvar over which to apply gain correction to compensate for convolution
+ GainCorRange float32 `def:"10" view:"-" json:"-" xml:"-"`
- // [def: 0.1] [view: -] gain correction multiplier -- how much to correct gains
- GainCor float32 `def:"0.1" view:"-" json:"-" xml:"-" desc:"gain correction multiplier -- how much to correct gains"`
+ // gain correction multiplier -- how much to correct gains
+ GainCor float32 `def:"0.1" view:"-" json:"-" xml:"-"`
- // [view: -] sig_gain / nvar
- SigGainNVar float32 `view:"-" json:"-" xml:"-" desc:"sig_gain / nvar"`
+ // sig_gain / nvar
+ SigGainNVar float32 `view:"-" json:"-" xml:"-"`
- // [view: -] overall multiplier on sigmoidal component for values below threshold = sig_mult * pow(gain * nvar, sig_mult_pow)
- SigMultEff float32 `view:"-" json:"-" xml:"-" desc:"overall multiplier on sigmoidal component for values below threshold = sig_mult * pow(gain * nvar, sig_mult_pow)"`
+ // overall multiplier on sigmoidal component for values below threshold = sig_mult * pow(gain * nvar, sig_mult_pow)
+ SigMultEff float32 `view:"-" json:"-" xml:"-"`
- // [view: -] 0.5 * sig_mult_eff -- used for interpolation portion
- SigValAt0 float32 `view:"-" json:"-" xml:"-" desc:"0.5 * sig_mult_eff -- used for interpolation portion"`
+ // 0.5 * sig_mult_eff -- used for interpolation portion
+ SigValAt0 float32 `view:"-" json:"-" xml:"-"`
- // [view: -] function value at interp_range - sig_val_at_0 -- for interpolation
- InterpVal float32 `view:"-" json:"-" xml:"-" desc:"function value at interp_range - sig_val_at_0 -- for interpolation"`
+ // function value at interp_range - sig_val_at_0 -- for interpolation
+ InterpVal float32 `view:"-" json:"-" xml:"-"`
}
func (xp *Params) Update() {
diff --git a/nxx1/nxx1_test.go b/nxx1/nxx1_test.go
index 914d3f7a1..b736c5afc 100644
--- a/nxx1/nxx1_test.go
+++ b/nxx1/nxx1_test.go
@@ -9,7 +9,7 @@ package nxx1
import (
"testing"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
// TOLERANCE is the numerical difference tolerance for comparing vs. target values
diff --git a/python/LICENSE b/python/LICENSE
deleted file mode 100644
index abdfa70c2..000000000
--- a/python/LICENSE
+++ /dev/null
@@ -1,29 +0,0 @@
-BSD 3-Clause License
-
-Copyright (c) 2018, The emergent Authors
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-* Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/python/MANIFEST.in b/python/MANIFEST.in
deleted file mode 100644
index a5ebd87eb..000000000
--- a/python/MANIFEST.in
+++ /dev/null
@@ -1,2 +0,0 @@
-global-include *.so *.py
-global-exclude build.py
diff --git a/python/Makefile b/python/Makefile
deleted file mode 100644
index 220a44d44..000000000
--- a/python/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-# Makefile for gopy pkg generation of python bindings to emergent
-# File is generated by gopy (will not be overwritten though)
-# gopy exe -name=leabra -vm=python3 -no-warn -exclude=driver,oswin -main="runtime.LockOSThread(); gimain.Main(func() { GoPyMainRun() })" math/rand github.com/goki/ki/ki github.com/goki/mat32 github.com/goki/gi/units github.com/goki/gi/gi github.com/goki/gi/svg github.com/goki/gi/giv github.com/goki/gi/gi3d github.com/goki/gi/gimain github.com/emer/etable github.com/emer/emergent github.com/emer/leabra/chans github.com/emer/leabra/fffb github.com/emer/leabra/knadapt github.com/emer/leabra/nxx1 github.com/emer/leabra/leabra github.com/emer/leabra/spike github.com/emer/leabra/deep github.com/emer/leabra/hip github.com/emer/leabra/rl github.com/emer/leabra/pbwm github.com/emer/leabra/glong github.com/emer/leabra/pcore github.com/emer/leabra/agate github.com/emer/vision
-
-PYTHON=python3
-PIP=$(PYTHON) -m pip
-
-PBGV=`$(PIP) list | grep PyBindGen`
-
-all: prereq gen
-
-.PHONY: prereq gen all build install install-pkg install-exe clean
-
-prereq:
- @echo "Installing go prerequisites:"
- - go get golang.org/x/tools/cmd/goimports # this installs into ~/go/bin
- - go get github.com/go-python/gopy
- @echo "Installing python prerequisites -- ignore err if already installed:"
- - $(PIP) install -r requirements.txt
- @echo
- @echo "if this fails, you may see errors like this:"
- @echo " Undefined symbols for architecture x86_64:"
- @echo " _PyInit__gi, referenced from:..."
- @echo
-
-install: install-pkg install-exe
-
-# note: it is important that leabra come before deep otherwise deep captures all the common types
-# unfortunately this means that all sub-packages need to be explicitly listed.
-gen:
- gopy exe -name=leabra -vm=python3 -no-warn -exclude=driver,oswin,draw,example,examples,gif,jpeg,png,draw -main="runtime.LockOSThread(); gimain.Main(func() { GoPyMainRun() })" math/rand image github.com/anthonynsimon/bild/transform github.com/goki/ki/ki github.com/goki/ki/kit github.com/goki/mat32 github.com/goki/gi/units github.com/goki/gi/gist github.com/goki/gi/girl github.com/goki/gi/gi github.com/goki/gi/svg github.com/goki/gi/giv github.com/goki/gi/gi3d github.com/goki/gi/gimain github.com/emer/etable github.com/emer/emergent github.com/emer/leabra/chans github.com/emer/leabra/fffb github.com/emer/leabra/knadapt github.com/emer/leabra/nxx1 github.com/emer/leabra/leabra github.com/emer/leabra/spike github.com/emer/leabra/deep github.com/emer/leabra/hip github.com/emer/leabra/rl github.com/emer/leabra/pbwm github.com/emer/leabra/glong github.com/emer/leabra/pcore github.com/emer/leabra/agate github.com/emer/vision github.com/emer/etorch
-
-build:
- $(MAKE) -C leabra build
-
-install-pkg:
- # this does a local install of the package, building the sdist and then directly installing it
- # copy pyside/*.py etc to leabra so these libs will be installed along with rest
- cp pyside/*.py leabra/
- rm -rf dist build */*.egg-info *.egg-info
- $(PYTHON) setup.py sdist
- $(PIP) install dist/*.tar.gz
-
-install-exe:
- # install executable into /usr/local/bin
- cp leabra/pyleabra /usr/local/bin/
-
-clean:
- rm -rf leabra dist build */*.egg-info *.egg-info
-
diff --git a/python/README.md b/python/README.md
deleted file mode 100644
index 30f0f3583..000000000
--- a/python/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Python interface to emergent / Leabra
-
-**These Python interfaces are currently unused**
-
-You can run the Go version of *emergent* via Python, using the [gopy](https://github.com/go-python/gopy) tool that automatically creates Python bindings for Go packages.
-
-See the [GoGi Python README](https://github.com/goki/gi/blob/master/python/README.md) for more details on how the python wrapper works and how to use it for GUI-level functionality. **If you encounter any difficulties with this install, then try doing the install in GoGi first**, and read more about the different install issues there.
-
-See the `.py` versions of various projects in `examples`, and especially in the [Comp Cog Neuro sims](https://github.com/CompCogNeuro/sims), for many examples of Python versions.
-
-See [etable pyet](https://github.com/emer/etable/tree/master/examples/pyet) for example code for converting between the Go `etable.Table` and `numpy`, `torch`, and `pandas` table structures. All of the converted projects rely on `etable` because it provides a complete GUI interface for viewing and manipulating the data, but it is easy to convert any of these tables into Python-native formats (and copy back-and-forth). The `pyet` python library (in `pyside` and auto-installed with this package) has the necessary routines.
-
-# Installation
-
-First, you have to install the Go version of emergent: [Wiki Install](https://github.com/emer/emergent/wiki/Install).
-
-Python version 3 (3.6, 3.8 have been well tested) is recommended.
-
-This assumes that you are using go modules, as discussed in the wiki install page, and *that you are in the `leabra` directory where you installed leabra* (e.g., `git clone https://github.com/emer/leabra` and then `cd leabra`)
-
-```sh
-$ cd python # should be in leabra/python now -- i.e., the dir where this README.md is..
-$ make
-$ make install # may need to do: sudo make install -- installs into /usr/local/bin and python site-packages
-$ cd ../examples/ra25
-$ ./ra25.py # runs using magic code on first line of file -- alternatively:
-$ pyleabra -i ra25.py # pyleabra was installed during make install into /usr/local/bin
-```
-
-The `pyleabra` executable combines standard python and the full Go emergent and GoGi gui packages -- see the information in the GoGi python readme for more technical information about this.
-
-# Sharing install
-
-To make a compiled version available to others, you just need the `dist/leabra-1.1.15.tar.gz` file and the `pyleabra` executable:
-
-```sh
-$ ./pyleabra -m pip install leabra-1.1.15.tar.gz
-$ ./pyleabra -m pip install numpy # numpy is needed
-$ cp pyleabra /usr/local/bin/
-```
-
-These steps might require `sudo` permissions.
-
diff --git a/python/go.mod b/python/go.mod
deleted file mode 100644
index 70d9312d9..000000000
--- a/python/go.mod
+++ /dev/null
@@ -1,30 +0,0 @@
-module github.com/emer/leabra/python
-
-go 1.15
-
-require (
- github.com/alecthomas/chroma v0.8.2
- github.com/anthonynsimon/bild v0.13.0
- github.com/apache/arrow/go/arrow v0.0.0-20201121231650-0e8be3caa4a6
- github.com/aymerick/douceur v0.2.0
- github.com/emer/emergent v1.1.27
- github.com/emer/etable v1.0.27
- github.com/emer/etorch v1.0.6
- github.com/emer/axon v1.2.17
- github.com/emer/vision v1.1.6
- github.com/go-gl/mathgl v1.0.0
- github.com/go-python/gopy v0.3.4
- github.com/goki/gi v1.2.7
- github.com/goki/ki v1.1.3
- github.com/goki/mat32 v1.0.9
- github.com/goki/pi v1.0.14
- github.com/goki/vci v1.0.0
- github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0
- github.com/ianbruene/go-difflib v1.2.0
- github.com/pkg/errors v0.9.1 // indirect
- github.com/srwiley/rasterx v0.0.0-20200120212402-85cb7272f5e9
- github.com/srwiley/scanx v0.0.0-20190309010443-e94503791388
- golang.org/x/image v0.0.0-20200927104501-e162460cd6b5
- gonum.org/v1/gonum v0.9.1
- gonum.org/v1/plot v0.9.0
-)
diff --git a/python/go.sum b/python/go.sum
deleted file mode 100644
index 88a88719e..000000000
--- a/python/go.sum
+++ /dev/null
@@ -1,367 +0,0 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-gioui.org v0.0.0-20200628203458-851255f7a67b/go.mod h1:jiUwifN9cRl/zmco43aAqh0aV+s9GbhG13KcD+gEpkU=
-github.com/BurntSushi/freetype-go v0.0.0-20160129220410-b763ddbfe298 h1:1qlsVAQJXZHsaM8b6OLVo6muQUQd4CwkH/D3fnnbHXA=
-github.com/BurntSushi/freetype-go v0.0.0-20160129220410-b763ddbfe298/go.mod h1:D+QujdIlUNfa0igpNMk6UIvlb6C252URs4yupRUV4lQ=
-github.com/BurntSushi/graphics-go v0.0.0-20160129215708-b43f31a4a966 h1:lTG4HQym5oPKjL7nGs+csTgiDna685ZXjxijkne828g=
-github.com/BurntSushi/graphics-go v0.0.0-20160129215708-b43f31a4a966/go.mod h1:Mid70uvE93zn9wgF92A/r5ixgnvX8Lh68fxp9KQBaI0=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/BurntSushi/xgb v0.0.0-20200324125942-20f126ea2843 h1:3iF31c7rp7nGZVDv7YQ+VxOgpipVfPKotLXykjZmwM8=
-github.com/BurntSushi/xgb v0.0.0-20200324125942-20f126ea2843/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/BurntSushi/xgb v0.0.0-20201008132610-5f9e7b3c49cd h1:u7K2oMFMd8APDV3fM1j2rO3U/XJf1g1qC3DDTKou8iM=
-github.com/BurntSushi/xgb v0.0.0-20201008132610-5f9e7b3c49cd/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/BurntSushi/xgbutil v0.0.0-20190907113008-ad855c713046 h1:O/r2Sj+8QcMF7V5IcmiE2sMFV2q3J47BEirxbXJAdzA=
-github.com/BurntSushi/xgbutil v0.0.0-20190907113008-ad855c713046/go.mod h1:uw9h2sd4WWHOPdJ13MQpwK5qYWKYDumDqxWWIknEQ+k=
-github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
-github.com/Masterminds/vcs v1.13.1 h1:NL3G1X7/7xduQtA2sJLpVpfHTNBALVNSjob6KEjPXNQ=
-github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA=
-github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ=
-github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
-github.com/ajstarks/svgo v0.0.0-20200725142600-7a3c8b57fecb h1:EVl3FJLQCzSbgBezKo/1A4ADnJ4mtJZ0RvnNzDJ44nY=
-github.com/ajstarks/svgo v0.0.0-20200725142600-7a3c8b57fecb/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
-github.com/akutz/sortfold v0.2.1 h1:u9x3FC6oM+6gZKEVNRnmVafJgappwrv9YqpELQCYViI=
-github.com/akutz/sortfold v0.2.1/go.mod h1:m1NArmessx+/3z2N8MiiTjq79A3WwZwDDiZ7eeD4jHA=
-github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38/go.mod h1:r7bzyVFMNntcxPZXK3/+KdruV1H5KSlyVY0gc+NgInI=
-github.com/alecthomas/chroma v0.7.3/go.mod h1:sko8vR34/90zvl5QdcUdvzL3J8NKjAUx9va9jPuFNoM=
-github.com/alecthomas/chroma v0.8.0 h1:HS+HE97sgcqjQGu5uVr8jIE55Mmh5UeQ7kckAhHg2pY=
-github.com/alecthomas/chroma v0.8.0/go.mod h1:sko8vR34/90zvl5QdcUdvzL3J8NKjAUx9va9jPuFNoM=
-github.com/alecthomas/chroma v0.8.2 h1:x3zkuE2lUk/RIekyAJ3XRqSCP4zwWDfcw/YJCuCAACg=
-github.com/alecthomas/chroma v0.8.2/go.mod h1:sko8vR34/90zvl5QdcUdvzL3J8NKjAUx9va9jPuFNoM=
-github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721/go.mod h1:QO9JBoKquHd+jz9nshCh40fOfO+JzsoXy8qTHF68zU0=
-github.com/alecthomas/kong v0.2.4/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE=
-github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ=
-github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-github.com/anthonynsimon/bild v0.12.0/go.mod h1:tpzzp0aYkAsMi1zmfhimaDyX1xjn2OUc1AJZK/TF0AE=
-github.com/anthonynsimon/bild v0.13.0 h1:mN3tMaNds1wBWi1BrJq0ipDBhpkooYfu7ZFSMhXt1C8=
-github.com/anthonynsimon/bild v0.13.0/go.mod h1:tpzzp0aYkAsMi1zmfhimaDyX1xjn2OUc1AJZK/TF0AE=
-github.com/apache/arrow/go/arrow v0.0.0-20200628183233-0b9720463eec h1:n+QIIV5z1XRDz9+Ul5Za0uof/0n6Xr80PH+5wz2IoEY=
-github.com/apache/arrow/go/arrow v0.0.0-20200628183233-0b9720463eec/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0=
-github.com/apache/arrow/go/arrow v0.0.0-20201121231650-0e8be3caa4a6 h1:I6DhrNsGbs+MV5BlunXOJ3Q+aSMaM2t0yiYWzfY+ySU=
-github.com/apache/arrow/go/arrow v0.0.0-20201121231650-0e8be3caa4a6/go.mod h1:c9sxoIT3YgLxH4UhLOCKaBlEojuMhVYpk4Ntv3opUTQ=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
-github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
-github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
-github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
-github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
-github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2 h1:t8KYCwSKsOEZBFELI4Pn/phbp38iJ1RRAkDFNin1aak=
-github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/chewxy/math32 v1.0.6 h1:JWZYUNl2rtgVVui6z8JBsDgkOG2DYmfSODyo95yKfx4=
-github.com/chewxy/math32 v1.0.6/go.mod h1:dOB2rcuFrCn6UHrze36WSLVPKtzPMRAQvBvUwkSsLqs=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
-github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ=
-github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964/go.mod h1:Xd9hchkHSWYkEqJwUGisez3G1QY8Ryz0sdWrLPMGjLk=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/dlclark/regexp2 v1.2.1 h1:Ff/S0snjr1oZHUNOkvA/gP6KUaMg5vDDl3Qnhjnwgm8=
-github.com/dlclark/regexp2 v1.2.1/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E=
-github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/emer/emergent v1.1.10/go.mod h1:YscDt9Cl7zEl9gNbjHJ5jzoFUyLNHilSfCO9wHJWly4=
-github.com/emer/emergent v1.1.11 h1:iJxKpJdF2D1GN7veCmU6TMrmvfCXcJgbfh10wybhi3k=
-github.com/emer/emergent v1.1.11/go.mod h1:9Y+EJNcAjPMdiKn/HxsF3QF3Oc0fvvNMFWEpG8M9yrQ=
-github.com/emer/emergent v1.1.12/go.mod h1:XkJOn5TpDnC8uY7hVvmyoRRo76VwV6fXXdZGeg5Tgpk=
-github.com/emer/emergent v1.1.13 h1:/A+K39oyrhtDklMfNdld5do2fyBKdMPffaBVy752x1g=
-github.com/emer/emergent v1.1.13/go.mod h1:VUAhHTyRN7nhaOVAKnYQ8YHr1J7Ha+MhMCQOG+Ttdec=
-github.com/emer/empi v1.0.8/go.mod h1:/hKEKGnnOVHn4ePadz6Ik/gnucbDq+KyLrVmTtLu+KI=
-github.com/emer/empi v1.0.9/go.mod h1:T+NJAeHlVy9ETmQjaL+9pUV/k1TwOc6rKQ0tPWJrvWs=
-github.com/emer/etable v1.0.14/go.mod h1:BKPO7uhj3wbgY3TlBFCqmFGLsozJYydE27dSa3hVi48=
-github.com/emer/etable v1.0.16 h1:xv1vNYaOMZQijmhWH0vNXN1uVp0ZZAKXEA+JLHVDpuA=
-github.com/emer/etable v1.0.16/go.mod h1:eP3PjPnmQjX4iXF/gRSMgFV8loepGgDbdweRa6t/wo0=
-github.com/emer/etable v1.0.17/go.mod h1:eP3PjPnmQjX4iXF/gRSMgFV8loepGgDbdweRa6t/wo0=
-github.com/emer/etable v1.0.18 h1:o2DFfVt+ujb8ks5v0LfL5zPfLardf+XrMShnudlBA30=
-github.com/emer/etable v1.0.18/go.mod h1:/qOVnq0jL+JWu3HBepfX92ktUMlWiog7a3Uxsmk5Y7Q=
-github.com/emer/etorch v1.0.0 h1:9fpoaitYObEGr0Ogp4BBOt1P7e/HlsyUv88azaIw1Tw=
-github.com/emer/etorch v1.0.0/go.mod h1:ZG/F0YXe4/gUBcbTWQ9G3nXX99jklsqDJE2GBXKDZSU=
-github.com/emer/leabra v1.1.10/go.mod h1:yghPPPY6ExxgBQTe2WTLjwdy3ylKWEMb7xxD1uwVpro=
-github.com/emer/leabra v1.1.11 h1:oUrVvyjwTQzEAGcU21lRLVAd5QxrNsFuSn4eBR5kdGc=
-github.com/emer/leabra v1.1.11/go.mod h1:hZZ+mueFUs2mYZW10BIzPjyTS73h9qi2zMwTE6HK4xw=
-github.com/emer/leabra v1.1.14 h1:kiztdfbQ6KYDuro9xh96aP08VYiie8Y4vQFh26L1DdM=
-github.com/emer/leabra v1.1.14/go.mod h1:tlgKKOszSXjuzxOdEQTpxOdkEMmF5coSkVmZF8rKR7g=
-github.com/emer/leabra v1.1.15 h1:r7rcgLkT73C5hzZVNdvjpNcHyf9ELfMUrUblzGsAmGQ=
-github.com/emer/leabra v1.1.15/go.mod h1:ekWEnoNYCQ88t45GPPQGrT38rfbKgtQvEVTLNvewDTk=
-github.com/emer/vision v1.1.4 h1:5fQw3JS6CM6z/DUQ5xG6F7Bkof/5JkC5pBeb1oFEQiU=
-github.com/emer/vision v1.1.4/go.mod h1:5Mw0cVXMMnM/7MR0doyQb+ESAKBUEWgxjsyPmyOWl/c=
-github.com/emer/vision v1.1.6 h1:R+a5ugzB0o556gxw1S20oKbJOxtyJtx+B6vsN9U87TQ=
-github.com/emer/vision v1.1.6/go.mod h1:AuFuhLKVG5YAVA588eM0K68tNzpP5pnDlxcYiVUUXtI=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
-github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
-github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8=
-github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/gabriel-vasile/mimetype v1.1.1 h1:qbN9MPuRf3bstHu9zkI9jDWNfH//9+9kHxr9oRBBBOA=
-github.com/gabriel-vasile/mimetype v1.1.1/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To=
-github.com/gabriel-vasile/mimetype v1.1.2 h1:gaPnPcNor5aZSVCJVSGipcpbgMWiAAj9z182ocSGbHU=
-github.com/gabriel-vasile/mimetype v1.1.2/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To=
-github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
-github.com/go-gl/gl v0.0.0-20190320180904-bf2b1f2f34d7 h1:SCYMcCJ89LjRGwEa0tRluNRiMjZHalQZrVrvTbPh+qw=
-github.com/go-gl/gl v0.0.0-20190320180904-bf2b1f2f34d7/go.mod h1:482civXOzJJCPzJ4ZOX/pwvXBWSnzD4OKMdH4ClKGbk=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200420212212-258d9bec320e/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200707082815-5321531c36a2 h1:Ac1OEHHkbAZ6EUnJahF0GKcU0FjPc/V8F1DvjhKngFE=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200707082815-5321531c36a2/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20201108214237-06ea97f0c265 h1:BcbKYUZo/TKPsiSh7LymK3p+TNAJJW3OfGO/21sBbiA=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20201108214237-06ea97f0c265/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/mathgl v0.0.0-20190713194549-592312d8590a h1:yoAEv7yeWqfL/l9A/J5QOndXIJCldv+uuQB1DSNQbS0=
-github.com/go-gl/mathgl v0.0.0-20190713194549-592312d8590a/go.mod h1:yhpkQzEiH9yPyxDUGzkmgScbaBVlhC06qodikEM0ZwQ=
-github.com/go-gl/mathgl v1.0.0 h1:t9DznWJlXxxjeeKLIdovCOVJQk/GzDEL7h/h+Ro2B68=
-github.com/go-gl/mathgl v1.0.0/go.mod h1:yhpkQzEiH9yPyxDUGzkmgScbaBVlhC06qodikEM0ZwQ=
-github.com/go-latex/latex v0.0.0-20200518072620-0806b477ea35 h1:uroDDLmuCK5Pz5J/Ef5vCL6F0sJmAtZFTm0/cF027F4=
-github.com/go-latex/latex v0.0.0-20200518072620-0806b477ea35/go.mod h1:PNI+CcWytn/2Z/9f1SGOOYn0eILruVyp0v2/iAs8asQ=
-github.com/go-python/gopy v0.3.1/go.mod h1:gQ2Itc84itA1AjrVqnMnv7HLkfmNObRXlR1co7CXpbk=
-github.com/go-python/gopy v0.3.2-0.20200916100237-6d8fa23dbe8a h1:8nA6hyfrKNHl0EjaoYEl5Yqw8COcHjwAgY/wdkAavqg=
-github.com/go-python/gopy v0.3.2-0.20200916100237-6d8fa23dbe8a/go.mod h1:IcdfJ6FULCVq1E11uXTaa4XWORT77SLHZrO4DVTqB78=
-github.com/go-python/gopy v0.3.2 h1:nB5nK4JThMkmsev0JADnD7bsK3SFMJWK+8z0kw9a1kg=
-github.com/go-python/gopy v0.3.2/go.mod h1:IcdfJ6FULCVq1E11uXTaa4XWORT77SLHZrO4DVTqB78=
-github.com/goki/freetype v0.0.0-20181231101311-fa8a33aabaff h1:W71vTCKoxtdXgnm1ECDFkfQnpdqAO00zzGXLA5yaEX8=
-github.com/goki/freetype v0.0.0-20181231101311-fa8a33aabaff/go.mod h1:wfqRWLHRBsRgkp5dmbG56SA0DmVtwrF5N3oPdI8t+Aw=
-github.com/goki/gi v1.0.14/go.mod h1:Ig7a4paXz+H2k1L9Yd4ZRnVw/BIh2XAhdTiQhKGkK7c=
-github.com/goki/gi v1.0.16 h1:O1t6huv/GPHmkZC+peftO+2XG/aijKaljcSb42PCP2s=
-github.com/goki/gi v1.0.16/go.mod h1:E1XS7yoU8sD2XtpnI1RznuKzB8lwYYMqo3J/5lt/9vM=
-github.com/goki/gi v1.0.17/go.mod h1:E1XS7yoU8sD2XtpnI1RznuKzB8lwYYMqo3J/5lt/9vM=
-github.com/goki/gi v1.1.0 h1:boO48CeCdiMtitplfDniCOJB5zj1hJlmJLVgq2ERNc0=
-github.com/goki/gi v1.1.0/go.mod h1:ssRCcYuLBqqm2M4kY/Xj5WwZ2D/eaXmdUoXALIIbfDs=
-github.com/goki/ki v1.0.0/go.mod h1:X+gmVeAym3JDSbbiA7iF1qkgAlTVWl1JV9sRsGDzxOA=
-github.com/goki/ki v1.0.2/go.mod h1:X+gmVeAym3JDSbbiA7iF1qkgAlTVWl1JV9sRsGDzxOA=
-github.com/goki/ki v1.0.4 h1:F6W6vEMfI583A4pvyqOlvPXVjzRNzj3EWaGD1vyBbgc=
-github.com/goki/ki v1.0.4/go.mod h1:X+gmVeAym3JDSbbiA7iF1qkgAlTVWl1JV9sRsGDzxOA=
-github.com/goki/ki v1.0.5 h1:XsCtIyiu/PysmAuRbfY+l+q4Zk1/W7jS3d6GqxO+f9U=
-github.com/goki/ki v1.0.5/go.mod h1:X+gmVeAym3JDSbbiA7iF1qkgAlTVWl1JV9sRsGDzxOA=
-github.com/goki/mat32 v1.0.2 h1:pWwejsySL7TXcuE+axGVhF6I4U9YBK6RCDyYwgPmucA=
-github.com/goki/mat32 v1.0.2/go.mod h1:SCqUsgLhG48i7wu/Kce9OF62abYJnxCX7YL5TOxZ0K4=
-github.com/goki/pi v1.0.7 h1:7v3FklAhsu2oVBxVOYQ9od3ynVAfjevKHgekEGNmVLc=
-github.com/goki/pi v1.0.7/go.mod h1:V3Jaw+FKB0D5u2UJJA+6EWpd4bBevWN6DaxTFpOy+z8=
-github.com/goki/prof v0.0.0-20180502205428-54bc71b5d09b h1:3zU6niF8uvEaNtRBhOkmgbE/Fx7D6xuALotArTpycNc=
-github.com/goki/prof v0.0.0-20180502205428-54bc71b5d09b/go.mod h1:pgRizZOb3eUJr+ByZnXnPvt+a0fVOTn0Ujc2TqVZpW4=
-github.com/goki/vci v1.0.0 h1:ib0x+rdYF84vX6uNOIQ1dRKFx7fgK7hDcv+cp0CkRhg=
-github.com/goki/vci v1.0.0/go.mod h1:uOQl8kDy2Nb7MEY8cyz72ntp2PaTwJkm2GZNKrEKHE0=
-github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
-github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/gonuts/commander v0.1.0 h1:EcDTiVw9oAVORFjQOEOuHQqcl6OXMyTgELocTq6zJ0I=
-github.com/gonuts/commander v0.1.0/go.mod h1:qkb5mSlcWodYgo7vs8ulLnXhfinhZsZcm6+H/z1JjgY=
-github.com/gonuts/flag v0.1.0 h1:fqMv/MZ+oNGu0i9gp0/IQ/ZaPIDoAZBOBaJoV7viCWM=
-github.com/gonuts/flag v0.1.0/go.mod h1:ZTmTGtrSPejTo/SRNhCqwLTmiAgyBdCkLYhHrAoBdz4=
-github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY=
-github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c=
-github.com/h2non/filetype v1.1.0 h1:Or/gjocJrJRNK/Cri/TDEKFjAR+cfG6eK65NGYB6gBA=
-github.com/h2non/filetype v1.1.0/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/ianbruene/go-difflib v1.2.0 h1:iARmgaCq6nW5QptdoFm0PYAyNGix3xw/xRgEwphJSZw=
-github.com/ianbruene/go-difflib v1.2.0/go.mod h1:uJbrQ06VPxjRiRIrync+E6VcWFGW2dWqw2gvQp6HQPY=
-github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
-github.com/iancoleman/strcase v0.1.1 h1:2I+LRClyCYB7JgZb9U0k75VHUiQe9RfknRqDyUfzp7k=
-github.com/iancoleman/strcase v0.1.1/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
-github.com/iancoleman/strcase v0.1.2 h1:gnomlvw9tnV3ITTAxzKSgTF+8kFWcU/f+TgttpXGz1U=
-github.com/iancoleman/strcase v0.1.2/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o=
-github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s=
-github.com/jinzhu/copier v0.0.0-20201025035756-632e723a6687 h1:bWXum+xWafUxxJpcXnystwg5m3iVpPYtrGJFc1rjfLc=
-github.com/jinzhu/copier v0.0.0-20201025035756-632e723a6687/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
-github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
-github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0=
-github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
-github.com/jung-kurt/gofpdf v1.16.2 h1:jgbatWHfRlPYiK85qgevsZTHviWXKwB1TTiKdz5PtRc=
-github.com/jung-kurt/gofpdf v1.16.2/go.mod h1:1hl7y57EsiPAkLbOwzpzqgx1A30nQCk/YmFV8S2vmK0=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/phpdave11/gofpdi v1.0.7/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
-github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/srwiley/oksvg v0.0.0-20200311192757-870daf9aa564/go.mod h1:afMbS0qvv1m5tfENCwnOdZGOF8RGR/FsZ7bvBxQGZG4=
-github.com/srwiley/rasterx v0.0.0-20200120212402-85cb7272f5e9 h1:m59mIOBO4kfcNCEzJNy71UkeF4XIx2EVmL9KLwDQdmM=
-github.com/srwiley/rasterx v0.0.0-20200120212402-85cb7272f5e9/go.mod h1:mvWM0+15UqyrFKqdRjY6LuAVJR0HOVhJlEgZ5JWtSWU=
-github.com/srwiley/scanFT v0.0.0-20190309001647-3267585b8d6d/go.mod h1:Z7vQGQxdJpx5MQ8GkOGiTJL4zq8eSyI86tTUiy9cov0=
-github.com/srwiley/scanx v0.0.0-20190309010443-e94503791388 h1:ZdkidVdpLW13BQ9a+/3uerT2ezy9J7KQWH18JCfhDmI=
-github.com/srwiley/scanx v0.0.0-20190309010443-e94503791388/go.mod h1:C/WY5lmWfMtPFYYBTd3Lzdn4FTLr+RxlIeiBNye+/os=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
-golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190321063152-3fc05d484e9f/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190703141733-d6a02ce849c9/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20200119044424-58c23975cae1 h1:5h3ngYt7+vXCDZCup/HkCQgW5XwmSvR/nA2JmJ0RErg=
-golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20200801110659-972c09e46d76 h1:U7GPaoQyQmX+CBRWXKrvRzWTbd+slqeSh8uARsIyhAw=
-golang.org/x/image v0.0.0-20200801110659-972c09e46d76/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 h1:QelT11PB4FXiDEXucrfNckHoFxwt8USGY1ajP1ZF5lM=
-golang.org/x/image v0.0.0-20200927104501-e162460cd6b5/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA=
-golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200326194725-b1df9901287c h1:lOHr9KzDy5SxEmffvQO+sLmartg6RfDELX60/tv3qFg=
-golang.org/x/tools v0.0.0-20200326194725-b1df9901287c/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200619210111-0f592d2728bb/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200915201639-f4cefd1cb5ba/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
-golang.org/x/tools v0.0.0-20200917221617-d56e4e40bc9d h1:y39d97JVttj+rkTXITl1nf9Vsk+VoRuNzIDLFldUSB4=
-golang.org/x/tools v0.0.0-20200917221617-d56e4e40bc9d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
-golang.org/x/tools v0.0.0-20201121010211-780cb80bd7fb h1:z5+u0pkAUPUWd3taoTialQ2JAMo4Wo1Z3L25U4ZV9r0=
-golang.org/x/tools v0.0.0-20201121010211-780cb80bd7fb/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
-gonum.org/v1/gonum v0.7.0 h1:Hdks0L0hgznZLG9nzXb8vZ0rRvqNvAcgAp84y7Mwkgw=
-gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM=
-gonum.org/v1/gonum v0.8.1 h1:wGtP3yGpc5mCLOLeTeBdjeui9oZSz5De0eOjMLC/QuQ=
-gonum.org/v1/gonum v0.8.1/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
-gonum.org/v1/plot v0.7.0 h1:Otpxyvra6Ie07ft50OX5BrCfS/BWEMvhsCUHwPEJmLI=
-gonum.org/v1/plot v0.7.0/go.mod h1:2wtU6YrrdQAhAF9+MTd5tOQjrov/zF70b1i99Npjvgo=
-gonum.org/v1/plot v0.8.1 h1:1oWyfw7tIDDtKb+t+SbR9RFruMmNJlsKiZUolHdys2I=
-gonum.org/v1/plot v0.8.1/go.mod h1:3GH8dTfoceRTELDnv+4HNwbvM/eMfdDUGHFG2bo3NeE=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200911024640-645f7a48b24f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v0.0.0-20200910201057-6591123024b3/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/python/gotopy/README.md b/python/gotopy/README.md
deleted file mode 100644
index a19079ecd..000000000
--- a/python/gotopy/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-This directory has info and tools for converting Go-based simulation projects to Python.
-
-# GoToPy
-
-GoToPy does a first pass conversion of Go syntax to Python syntax: https://github.com/go-python/gotopy
-
-```sh
-$ go get github.com/go-python/gotopy
-```
-
-This does the install directly, so the gotopy executable should be in your `~/go/bin` directory, which you should add to your `PATH` if not already. Check by typing: `which gotopy` for example.
-
-To run directly:
-
-```sh
-$ gotopy -gogi mysim.go > mysim.py
-```
-
-The `-gogi` option is important for enabling extra conversions for the GoGi gui system.
-
-# leabra-to.py
-
-This Python program does additional steps specific to Leabra sims to attempt to get the code closer to usable. To prepare, run:
-
-```sh
-$ pip3 install -r requirements.txt
-```
-
-You can copy this file to an appropriate place on your path, e.g.:
-
-```sh
-$ cp leabra-to.py /usr/local/bin
-```
-
-and then run it:
-
-```sh
-$ leabra-to.py mysim.go
-```
-
-which generates a file named `mysim.py` -- important: will overwrite any existing!
-
-After running, you will need to fix the start and end by copying from an existing project that is similar (use ra25 if nothing else), with the CB callback functions at the top, and the `tbar.AddAction` calls in `ConfigGui` at the end that call these callbacks instead of the inline code. There may be other errors which you can discover by running it -- there is a diminishing returns point on this conversion process so it is not designed to be complete.
-
-
diff --git a/python/gotopy/leabra-to.py b/python/gotopy/leabra-to.py
deleted file mode 100755
index 070452666..000000000
--- a/python/gotopy/leabra-to.py
+++ /dev/null
@@ -1,371 +0,0 @@
-#!/usr/local/bin/python3
-
-# Copyright (c) 2020, The Emergent Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# use:
-# leabra-to.py mysim.go
-#
-# Generates mysim.py conversion of mysim.go, attempting to
-
-import os, sys, subprocess
-
-debug = False
-
-# these are defined below
-inserts = []
-replaces = []
-deletes = []
-
-def read_as_string(fnm):
- # reads file as string
- if not os.path.isfile(fnm):
- return ""
- with open(fnm, "r") as f:
- val = f.read()
- return val
-
-def write_string(fnm, stval):
- with open(fnm,"w") as f:
- f.write(stval)
-
-def gotopy(fname):
- result = subprocess.run(["gotopy","-gogi", fname], capture_output=True)
- if len(result.stderr) > 0:
- print(str(result.stderr, "utf-8"))
- return str(result.stdout, "utf-8")
-
-def repls(txt):
- txt = txt.replace("leabra.LeabraLayer(", "leabra.Layer(")
- txt = txt.replace(".AsLeabra()", "")
- txt = txt.replace("(`", "('")
- txt = txt.replace("`)", "')")
- txt = txt.replace(" = ss.TrainUpdt", " = ss.TrainUpdt.value")
- txt = txt.replace(" = ss.TestUpdt", " = ss.TestUpdt.value")
- txt = txt.replace(" ss.TrainUpdt >", " ss.TrainUpdt.value >")
- txt = txt.replace(" ss.TestUpdt >", " ss.TestUpdt.value >")
- return txt
-
-def inserttxt(txt, ati, ins):
- if debug:
- print("\n##########\nins:")
- print(ins)
- for i, v in enumerate(ins):
- txt.insert(ati+i, v)
-
-def repltxt(txt, ati, ftxt, itxt):
- if debug:
- print("\n##########\nrepl:")
- print(ftxt)
- print("with:")
- print(itxt)
- nf = len(ftxt)
- ni = len(itxt)
- for i, v in enumerate(itxt):
- if i < nf:
- txt[ati+i] = v
- else:
- txt.insert(ati+i, v)
- if nf > ni:
- del txt[ati+ni:ati+nf]
-
-def diffs(txt):
- lns = txt.splitlines()
- nln = lns.copy()
- ni = 0
- insi = -1
- rpli = -1
- deli = -1
- didone = False
- for i, v in enumerate(lns):
- for j, ir in enumerate(inserts):
- if j <= insi:
- continue
- ftxt = ir[0]
- lnoff = ir[1]
- itxt = ir[2]
- if ftxt in v:
- inserttxt(nln, ni+lnoff, itxt)
- ni += len(itxt)
- insi = j
- break
- for j, rp in enumerate(replaces):
- if j <= rpli:
- continue
- ftxt = rp[0]
- itxt = rp[1]
- if ftxt[0] == v:
- repltxt(nln, ni, ftxt, itxt)
- ni += len(itxt) - len(ftxt)
- rpli = j
- break
- for j, ft in enumerate(deletes):
- if j <= deli:
- continue
- if ft[0] == v:
- if debug:
- print("\n##########\ndel:")
- print(ft)
- del nln[ni:ni+len(ft)]
- ni -= len(ft)
- deli = j
- break
- ni += 1
- return '\n'.join(nln)
-
-def column(txt):
- lns = txt.splitlines()
- insc = False
- start = False
- for i, v in enumerate(lns):
- if " = etable.Schema(" in v:
- insc = True
- start = True
- continue
- if insc and "etensor." in v:
- op = v.find('("')
- if op < 0:
- insc = False
- continue
- if start:
- lns[i] = v[:op] + "[etable.Column" + v[op:]
- start = False
- else:
- lns[i] = v[:op] + "etable.Column" + v[op:]
- elif insc:
- lns[i-1] = lns[i-1][:-1] + "]"
- insc = False
- continue
- return '\n'.join(lns)
-
-def main(argv):
- if len(argv) < 2 or argv[1] == "help":
- print("\n%s converts leabra .go sim file to Python .py file\n" % argv[0])
- print("usage: just takes the input filename")
- exit(0)
-
- fname = argv[1]
- outfn = os.path.splitext(fname)[0] + ".py"
- raw = gotopy(fname)
- txt = diffs(raw)
- txt = repls(txt)
- txt = column(txt)
- write_string(outfn, txt)
-
-##############################################
-### text edits
-
-# the only constraint is that these must be *in sequential order* -- the index
-# is incremented for every match, so it doesn't revisit any matches more than once
-
-## tuple elements are: find, offset, text
-
-inserts = [
-("def New(ss):", -1, [
-''' self.vp = 0''',
-''' self.SetTags("vp", 'view:"-" desc:"viewport"')''',
-'',
-''' def InitParams(ss):''',
-''' """''',
-''' Sets the default set of parameters -- Base is always applied, and others can be optionally''',
-''' selected to apply on top of that''',
-''' """''',
-''' ss.Params.OpenJSON("my.params")''',
-]),
-("def Config(ss):", 4, [
-''' ss.InitParams()'''
-]),
-("viewUpdt = ss.TrainUpdt", 0, [
-''' if ss.Win != 0:''',
-''' ss.Win.PollEvents() # this is essential for GUI responsiveness while running''',
-]),
-("def Stopped(ss):", 10, [
-''' ss.UpdateClassView()'''
-]),
-("vp = win.WinViewport2D()", 1, [
-''' ss.vp = vp''',
-]),
-]
-
-replaces = [
-([
-''' err = net.Build()''',
-''' if err != 0:''',
-''' log.Println(err)''',
-''' return''',
-],[
-''' net.Build()'''
-]),
-([
-''' ss.RndSeed = time.Now().UnixNano()''',
-],[
-''' ss.RndSeed = int(datetime.now(timezone.utc).timestamp())''',
-]),
-([
-''' switch viewUpdt:''',
-''' if leabra.Cycle:''',
-],[
-''' if viewUpdt == leabra.Cycle:''',
-]),
-([
-''' if leabra.FastSpike:'''
-],[
-''' if viewUpdt == leabra.FastSpike:'''
-]),
-([
-''' if ss.ViewOn:''',
-''' switch :'''
-],[
-''' if ss.ViewOn:'''
-]),
-([
-''' epc, _, chg = ss.TrainEnv.Counter(env.Epoch)''',
-],[
-''' epc = env.CounterCur(ss.TrainEnv, env.Epoch)''',
-''' chg = env.CounterChg(ss.TrainEnv, env.Epoch)''',
-]),
-([
-''' ss.TrlSSE, ss.TrlAvgSSE = out.MSE(0.5)''',
-],[
-''' ss.TrlSSE = out.SSE(0.5) # 0.5 = per-unit tolerance -- right side of .5''',
-''' ss.TrlAvgSSE = ss.TrlSSE / len(out.Neurons)'''
-]),
-([
-''' _, _, chg = ss.TestEnv.Counter(env.Epoch)'''
-],[
-''' chg = env.CounterChg(ss.TestEnv, env.Epoch)'''
-]),
-([
-''' _, _, chg = ss.TestEnv.Counter(env.Epoch)'''
-],[
-''' chg = env.CounterChg(ss.TestEnv, env.Epoch)'''
-]),
-([
-''' err = ss.SetParamsSet("Base", sheet, setMsg)''',
-''' if ss.ParamSet != "" and ss.ParamSet != "Base":''',
-''' sps = ss.ParamSet.split()''',
-''' for ps in sps :''',
-''' err = ss.SetParamsSet(ps, sheet, setMsg)''',
-''' return err''',
-],[
-''' ss.SetParamsSet("Base", sheet, setMsg)''',
-''' if ss.ParamSet != "" and ss.ParamSet != "Base":''',
-''' sps = ss.ParamSet.split()''',
-''' for ps in sps:''',
-''' ss.SetParamsSet(ps, sheet, setMsg)''',
-]),
-([
-''' pset, err = ss.Params.SetByNameTry(setNm)''',
-''' if err != 0:''',
-''' return err''',
-''' if sheet == "" or sheet == "Network":''',
-''' netp, ok = pset.Sheets["Network"]''',
-''' if ok:''',
-''' ss.Net.ApplyParams(netp, setMsg)''',
-'',
-''' if sheet == "" or sheet == "Sim":''',
-''' simp, ok = pset.Sheets["Sim"]''',
-''' if ok:''',
-''' simp.Apply(ss, setMsg)''',
-'',
-''' return err''',
-],[
-''' pset = ss.Params.SetByNameTry(setNm)''',
-''' if sheet == "" or sheet == "Network":''',
-''' if "Network" in pset.Sheets:''',
-''' netp = pset.SheetByNameTry("Network")''',
-''' ss.Net.ApplyParams(netp, setMsg)''',
-''' if sheet == "" or sheet == "Sim":''',
-''' if "Sim" in pset.Sheets:''',
-''' simp= pset.SheetByNameTry("Sim")''',
-''' pyparams.ApplyParams(ss, simp, setMsg)''',
-]),
-([
-''' if ss.ValsTsrs == 0:''',
-''' ss.ValsTsrs = make({})''',
-''' tsr, ok = ss.ValsTsrs[name]''',
-''' if not ok:''',
-''' tsr = etensor.Float32()''',
-''' ss.ValsTsrs[name] = tsr''',
-],[
-''' if name in ss.ValsTsrs:''',
-''' return ss.ValsTsrs[name]''',
-''' tsr = etensor.Float32()''',
-''' ss.ValsTsrs[name] = tsr''',
-]),
-([
-''' sv = giv.AddNewStructView(split, "sv")''',
-''' sv.SetStruct(ss)'''
-],[
-''' cv = ss.NewClassView("sv")''',
-''' cv.AddFrame(split)''',
-''' cv.Config()'''
-]),
-([
-''' nv = *netview.NetView(tv.AddNewTab(netview.KiT_NetView, "NetView"))'''
-],[
-''' nv = netview.NetView()''',
-''' tv.AddTab(nv, "NetView")'''
-]),
-([
-''' plt = *eplot.Plot2D(tv.AddNewTab(eplot.KiT_Plot2D, "TrnEpcPlot"))'''
-],[
-''' plt = eplot.Plot2D()''',
-''' tv.AddTab(plt, "TrnEpcPlot")'''
-]),
-([
-''' plt = *eplot.Plot2D(tv.AddNewTab(eplot.KiT_Plot2D, "TstTrlPlot"))'''
-],[
-''' plt = eplot.Plot2D()''',
-''' tv.AddTab(plt, "TstTrlPlot")'''
-]),
-([
-''' plt = *eplot.Plot2D(tv.AddNewTab(eplot.KiT_Plot2D, "TstCycPlot"))'''
-],[
-''' plt = eplot.Plot2D()''',
-''' tv.AddTab(plt, "TstCycPlot")'''
-]),
-([
-''' plt = *eplot.Plot2D(tv.AddNewTab(eplot.KiT_Plot2D, "TstEpcPlot"))'''
-],[
-''' plt = eplot.Plot2D()''',
-''' tv.AddTab(plt, "TstEpcPlot")'''
-]),
-([
-''' plt = *eplot.Plot2D(tv.AddNewTab(eplot.KiT_Plot2D, "RunPlot"))'''
-],[
-''' plt = eplot.Plot2D()''',
-''' tv.AddTab(plt, "RunPlot")'''
-]),
-([
-''' plt = *eplot.Plot2D(tv.AddNewTab(eplot.KiT_Plot2D, "TrnEpcPlot"))'''
-],[
-''' plt = eplot.Plot2D()''',
-''' tv.AddTab(plt, "TrnEpcPlot")'''
-]),
-([
-''' split.SetSplits(.2, .8)'''
-],[
-''' split.SetSplitsList(go.Slice_float32([.2, .8]))''',
-''' recv = win.This()'''
-]),
-]
-
-deletes = [
-[
-''' # re-config env just in case a different set of patterns was''',
-],[
-''' # selected or patterns have been modified etc''',
-],[
-''' # ss.Win.PollEvents() // this can be used instead of running in a separate goroutine''',
-],[
-''' # update prior weight changes at start, so any DWt values remain visible at end''',
-''' # you might want to do this less frequently to achieve a mini-batch update''',
-''' # in which case, move it out to the TrainTrial method where the relevant''',
-''' # counters are being dealt with.''',
-]
-]
-
-main(sys.argv)
-
diff --git a/python/gotopy/requirements.txt b/python/gotopy/requirements.txt
deleted file mode 100644
index 8b1378917..000000000
--- a/python/gotopy/requirements.txt
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/python/pyside/etor.py b/python/pyside/etor.py
deleted file mode 100644
index dba7f318a..000000000
--- a/python/pyside/etor.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright (c) 2020, The Emergent Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# etor is python-side library for eTorch, for saving and copying network
-# state for visualization.
-
-# note: from below here should be updated for standalone etorch vs. leabra
-
-from etorch import go, etorch, gi, netview
-
-import torch
-
-class State(object):
- """
- State manages saving and copying of network state
- """
- def __init__(self, nn):
- self.nn = nn # our torch.nn module
- self.record = True # set to False to turn off recording
- self.rec_wts = False # set to True to turn on recording of prjn-level weight state
- self.trace = False # print out dimensions of what is recorded -- useful for initial config
- self.wtmap = {} # dict of names for prjn weights
- self.net = 0 # network that we save to
-
- def set_net(self, net):
- """
- set_net sets the etorch.Network to display to
- """
- self.net = net
-
- def rec(self, x, var):
- """
- rec records current tensor state x to variable named var
- """
- if not self.record:
- return
- if self.trace:
- print(var, x.size())
-
- sd = self.nn.state_dict()
- net = self.net
-
- nmv = var.split(".")
- vnm = nmv[-1]
- lnm = ".".join(nmv[:-1])
- ly = etorch.Layer(net.LayerByName(lnm))
- nst = ly.States[vnm]
- nst.Values.copy(torch.flatten(x))
-
- if not self.rec_wts:
- return
-
- for pi in ly.RcvPrjns:
- pj = etorch.Prjn(handle=pi)
- pnm = pj.Name()
- if not pnm in self.wtmap:
- continue
- wnm = self.wtmap[pnm]
- wts = sd[wnm + ".weight"]
- pst = pj.States["Wt"]
- pst.Values.copy(torch.flatten(wts))
- bnm = wnm + ".bias"
- if bnm in sd:
- bst = sd[bnm]
- lst = ly.States["Bias"]
- lst.Values.copy(torch.flatten(bst))
-
-class NetView(object):
- """
- NetView opens a separate window with the network view -- for standalone use.
- """
- def __init__(self, net):
- self.Net = net
- self.NetView = 0
- self.Win = 0
- self.vp = 0
-
- def open(ss):
- """
- open opens the window of this gui
- """
- width = 1600
- height = 1200
-
- win = gi.NewMainWindow("netview", "eTorch NetView", width, height)
- ss.Win = win
-
- vp = win.WinViewport2D()
- ss.vp = vp
- updt = vp.UpdateStart()
-
- mfr = win.SetMainFrame()
-
- nv = netview.NetView()
- mfr.AddChild(nv)
- nv.Var = "Act"
- nv.SetNet(ss.Net)
- ss.NetView = nv
-
- # main menu
- appnm = gi.AppName()
- mmen = win.MainMenu
- mmen.ConfigMenus(go.Slice_string([appnm, "File", "Edit", "Window"]))
-
- amen = gi.Action(win.MainMenu.ChildByName(appnm, 0))
- amen.Menu.AddAppMenu(win)
-
- emen = gi.Action(win.MainMenu.ChildByName("Edit", 1))
- emen.Menu.AddCopyCutPaste(win)
- win.MainMenuUpdated()
- vp.UpdateEndNoSig(updt)
- win.GoStartEventLoop()
-
- def update(ss):
- """
- call update to update display
- """
- ss.NetView.Record("") # note: can include any kind of textual state information here to display too
- ss.NetView.GoUpdate()
-
diff --git a/python/pyside/pyet.py b/python/pyside/pyet.py
deleted file mode 100644
index 62caad3de..000000000
--- a/python/pyside/pyet.py
+++ /dev/null
@@ -1,383 +0,0 @@
-# Copyright (c) 2020, The Emergent Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-# code for converting etensor.Tensor and etable.Table to / from
-# various python data formats including numpy, pandas, and pytorch `TensorDataset`,
-# which has the same structure as an `etable`, and is used in the
-# `pytorch` neural network framework.
-
-from leabra import go, etable, etensor
-
-import numpy as np
-import pandas as pd
-import torch
-import torch.utils.data as data_utils
-
-def etensor_to_numpy(et):
- """
- returns a numpy ndarray constructed from the given etensor.Tensor.
- data is copied into the numpy ndarray -- it is not a view.
- """
- nar = 0
- if et.DataType() == etensor.UINT8:
- nar = np.array(etensor.Uint8(et).Values, dtype=np.uint8)
- elif et.DataType() == etensor.INT8:
- nar = np.array(etensor.Int8(et).Values, dtype=np.int8)
- elif et.DataType() == etensor.UINT16:
- nar = np.array(etensor.Uint16(et).Values, dtype=np.uint16)
- elif et.DataType() == etensor.INT16:
- nar = np.array(etensor.Int16(et).Values, dtype=np.int16)
- elif et.DataType() == etensor.UINT32:
- nar = np.array(etensor.Uint32(et).Values, dtype=np.uint32)
- elif et.DataType() == etensor.INT32:
- nar = np.array(etensor.Int32(et).Values, dtype=np.int32)
- elif et.DataType() == etensor.UINT64:
- nar = np.array(etensor.Uint64(et).Values, dtype=np.uint64)
- elif et.DataType() == etensor.INT64:
- nar = np.array(etensor.Int64(et).Values, dtype=np.int64)
- elif et.DataType() == etensor.FLOAT32:
- nar = np.array(etensor.Float32(et).Values, dtype=np.float32)
- elif et.DataType() == etensor.FLOAT64:
- nar = np.array(etensor.Float64(et).Values, dtype=np.float64)
- elif et.DataType() == etensor.STRING:
- nar = np.array(etensor.String(et).Values)
- elif et.DataType() == etensor.INT:
- nar = np.array(etensor.Int(et).Values, dtype=np.intc)
- elif et.DataType() == etensor.BOOL:
- etb = etensor.Bits(et)
- sz = etb.Len()
- nar = np.zeros(sz, dtype=np.bool_)
- for i in range(sz):
- nar[i] = etb.Value1D(i)
- else:
- raise TypeError("tensor with type %s cannot be converted" % (et.DataType().String()))
- return 0
- # there does not appear to be a way to set the shape at the same time as initializing
- return nar.reshape(et.Shapes())
-
-
-def numpy_to_etensor(nar):
- """
- returns an etensor.Tensor constructed from the given etensor.Tensor
- data is copied into the Tensor -- it is not a view.
- """
- et = 0
- narf = np.reshape(nar, -1) # flat view
- if nar.dtype == np.uint8:
- et = etensor.NewUint8(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.int8:
- et = etensor.NewInt8(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.uint16:
- et = etensor.NewUint16(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.int16:
- et = etensor.NewInt16(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.uint32:
- et = etensor.NewUint32(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.int32:
- et = etensor.NewInt32(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.uint64:
- et = etensor.NewUint64(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.int64:
- et = etensor.NewInt64(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.float32:
- et = etensor.NewFloat32(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.float64:
- et = etensor.NewFloat64(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype.type is np.string_ or nar.dtype.type is np.str_:
- et = etensor.NewString(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.int_ or nar.dtype == np.intc:
- et = etensor.NewInt(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- et.Values.copy(narf)
- elif nar.dtype == np.bool_:
- et = etensor.NewBits(go.Slice_int(list(nar.shape)), go.nil, go.nil)
- rnar = narf
- sz = len(rnar)
- for i in range(sz):
- et.Set1D(i, rnar[i])
- else:
- raise TypeError("numpy ndarray with type %s cannot be converted" % (nar.dtype))
- return 0
- return et
-
-#########################
-# Copying
-
-def copy_etensor_to_numpy(nar, et):
- """
- copies data from etensor.Tensor (et, source) to existing numpy ndarray (nar, dest).
- """
- narf = np.reshape(nar, -1)
- etv = et
- if et.DataType() == etensor.UINT8:
- etv = etensor.Uint8(et).Values
- elif et.DataType() == etensor.INT8:
- etv = etensor.Int8(et).Values
- elif et.DataType() == etensor.UINT16:
- etv = etensor.Uint16(et).Values
- elif et.DataType() == etensor.INT16:
- etv = etensor.Int16(et).Values
- elif et.DataType() == etensor.UINT32:
- etv = etensor.Uint32(et).Values
- elif et.DataType() == etensor.INT32:
- etv = etensor.Int32(et).Values
- elif et.DataType() == etensor.UINT64:
- etv = etensor.Uint64(et).Values
- elif et.DataType() == etensor.INT64:
- etv = etensor.Int64(et).Values
- elif et.DataType() == etensor.FLOAT32:
- etv = etensor.Float32(et).Values
- elif et.DataType() == etensor.FLOAT64:
- etv = etensor.Float64(et).Values
- elif et.DataType() == etensor.STRING:
- etv = etensor.String(et).Values
- elif et.DataType() == etensor.INT:
- etv = etensor.Int(et).Values
- elif et.DataType() == etensor.BOOL:
- etb = etensor.Bits(et)
- sz = min(etb.Len(), len(narf))
- for i in range(sz):
- narf[i] = etb.Value1D(i)
- return
- else:
- raise TypeError("tensor with type %s cannot be copied" % (et.DataType().String()))
- return 0
- np.copyto(narf, etv, casting='unsafe')
-
-def copy_numpy_to_etensor(et, nar):
- """
- copies data from numpy ndarray (nar, source) to existing etensor.Tensor (et, dest)
- """
- narf = np.reshape(nar, -1)
- etv = et
- if et.DataType() == etensor.UINT8:
- etv = etensor.Uint8(et).Values
- elif et.DataType() == etensor.INT8:
- etv = etensor.Int8(et).Values
- elif et.DataType() == etensor.UINT16:
- etv = etensor.Uint16(et).Values
- elif et.DataType() == etensor.INT16:
- etv = etensor.Int16(et).Values
- elif et.DataType() == etensor.UINT32:
- etv = etensor.Uint32(et).Values
- elif et.DataType() == etensor.INT32:
- etv = etensor.Int32(et).Values
- elif et.DataType() == etensor.UINT64:
- etv = etensor.Uint64(et).Values
- elif et.DataType() == etensor.INT64:
- etv = etensor.Int64(et).Values
- elif et.DataType() == etensor.FLOAT32:
- etv = etensor.Float32(et).Values
- elif et.DataType() == etensor.FLOAT64:
- etv = etensor.Float64(et).Values
- elif et.DataType() == etensor.STRING:
- etv = etensor.String(et).Values
- elif et.DataType() == etensor.INT:
- etv = etensor.Int(et).Values
- elif et.DataType() == etensor.BOOL:
- etb = etensor.Bits(et)
- sz = min(etb.Len(), len(narf))
- for i in range(sz):
- narf[i] = etb.Value1D(i)
- return
- else:
- raise TypeError("tensor with type %s cannot be copied" % (et.DataType().String()))
- return 0
- etv.copy(narf) # go slice copy, not python copy = clone
-
-
-##########################################
-# Tables
-
-class eTable(object):
- """
- pyet.eTable is a Python version of the Go etable.Table, with slices of columns
- as numpy ndarrays, and corresponding column names, along with a coordinated
- dictionary of names to col indexes. This is returned by basic
- etable_to_py() function to convert all data from an etable.Table,
- and can then be used to convert into other python datatable / frame
- structures.
- """
- def __init__(self):
- self.Cols = []
- self.ColNames = []
- self.Rows = 0
- self.ColNameMap = {}
- self.MetaData = {}
-
- def __str__(dt):
- return "Columns: %s\nRows: %d Cols:\n%s\n" % (dt.ColNameMap, dt.Rows, dt.Cols)
-
- def UpdateColNameMap(dt):
- """
- UpdateColNameMap updates the column name map
- """
- dt.ColNameMap = {}
- for i, nm in enumerate(dt.ColNames):
- dt.ColNameMap[nm] = i
-
- def AddCol(dt, nar, name):
- """
- AddCol adds a numpy ndarray as a new column, with given name
- """
- dt.Cols.append(nar)
- dt.ColNames.append(name)
- dt.UpdateColNameMap()
-
- def ColByName(dt, name):
- """
- ColByName returns column of given name, or raises a LookupError if not found
- """
- if name in dt.ColNameMap:
- return dt.Cols[dt.ColNameMap[name]]
- raise LookupError("column named: %s not found" % (name))
-
- def MergeCols(dt, st_nm, n):
- """
- MergeCols merges n sequential columns into a multidimensional array, starting at given column name
- Resulting columns are all stored at st_nm
- """
- sti = dt.ColNameMap[st_nm]
- cls = dt.Cols[sti:sti+n]
- nc = np.column_stack(cls)
- dt.Cols[sti] = nc
- del dt.Cols[sti+1:sti+n]
- del dt.ColNames[sti+1:sti+n]
- dt.UpdateColNameMap()
-
- def ReshapeCol(dt, colnm, shp):
- """
- ReshapeCol reshapes column to given shape
- """
- ci = dt.ColNameMap[colnm]
- dc = dt.Cols[ci]
- dt.Cols[ci] = dc.reshape(shp)
-
-def etable_to_py(et):
- """
- returns a pyet.eTable python version of given etable.Table.
- The eTable can then be converted into other standard Python formats,
- but most of them don't quite capture exactly the same information, so
- the eTable can be handy to keep around.
- """
- pt = eTable()
- pt.Rows = et.Rows
- nc = len(et.Cols)
- for ci in range(nc):
- dc = et.Cols[ci]
- cn = et.ColNames[ci]
- nar = etensor_to_numpy(dc)
- pt.AddCol(nar, cn)
- for md in et.MetaData:
- pt.MetaData[md[0]] = md[1]
- return pt
-
-def py_to_etable(pt):
- """
- returns an etable.Table version of given pyet.eTable.
- """
- et = etable.Table()
- et.Rows = pt.Rows
- nc = len(pt.Cols)
- for ci in range(nc):
- pc = pt.Cols[ci]
- cn = pt.ColNames[ci]
- tsr = numpy_to_etensor(pc)
- et.AddCol(tsr, cn)
- for md in pt.MetaData:
- et.SetMetaData(md, pt.MetaData[md])
- return et
-
-def copy_etable_to_py(pt, et):
- """
- copies values in columns of same name from etable.Table to pyet.eTable
- """
- nc = len(pt.Cols)
- for ci in range(nc):
- pc = pt.Cols[ci]
- cn = pt.ColNames[ci]
- try:
- dc = et.ColByNameTry(cn)
- copy_etensor_to_numpy(pc, dc)
- except:
- pass
-
-def copy_py_to_etable(et, pt):
- """
- copies values in columns of same name from pyet.eTable to etable.Table
- """
- nc = len(et.Cols)
- for ci in range(nc):
- dc = et.Cols[ci]
- cn = et.ColNames[ci]
- try:
- pc = pt.ColByName(cn)
- copy_numpy_to_etensor(dc, pc)
- except:
- pass
-
-def etable_to_torch(et):
- """
- returns a torch.utils.data.TensorDataset constructed from the numeric columns
- of the given pyet.eTable (string columns are not allowed in TensorDataset)
- """
- tsrs = []
- nc = len(et.Cols)
- for ci in range(nc):
- dc = et.Cols[ci]
- cn = et.ColNames[ci]
-
- if dc.dtype.type is np.string_ or dc.dtype.type is np.str_:
- continue
-
- tsr = torch.from_numpy(dc)
- tsrs.append(tsr)
- ds = data_utils.TensorDataset(*tsrs)
- return ds
-
-def etable_to_pandas(et, skip_tensors=False):
- """
- returns a pandas DataFrame constructed from the columns
- of the given pyet.eTable, spreading tensor cells over sequential
- 1d columns, if they aren't skipped over.
- """
- ed = {}
- nc = len(et.Cols)
- for ci in range(nc):
- dc = et.Cols[ci]
- cn = et.ColNames[ci]
- if dc.ndim == 1:
- ed[cn] = dc
- continue
- if skip_tensors:
- continue
- csz = int(dc.size / et.Rows) # cell size
- rs = dc.reshape([et.Rows, csz])
- for i in range(csz):
- cnn = "%s_%d" % (cn, i)
- ed[cnn] = rs[:,i]
- df = pd.DataFrame(data=ed)
- return df
-
-def pandas_to_etable(df):
- """
- returns a pyet.eTable constructed from given pandas DataFrame
- """
- pt = eTable()
- pt.Rows = len(df.index)
- for cn in df.columns:
- dc = df.loc[:, cn].values
- pt.AddCol(dc, cn)
- return pt
-
diff --git a/python/pyside/pygiv.py b/python/pyside/pygiv.py
deleted file mode 100644
index eef4c5fd3..000000000
--- a/python/pyside/pygiv.py
+++ /dev/null
@@ -1,410 +0,0 @@
-# Copyright (c) 2019, The GoKi Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-from leabra import go, gi, giv, kit, units
-from enum import Enum
-
-class ClassViewObj(object):
- """
- ClassViewObj is the base class for Python-defined classes that support a GUI editor (View)
- that functions like the StructView in GoGi. It maintains a dict of tags for each field
- that determine tooltips and other behavior for the field GUI representation.
- """
- def __init__(self):
- self.Tags = {}
- self.ClassView = 0
- self.ClassViewInline = 0
- self.ClassViewDialog = 0
-
- def SetTags(self, field, tags):
- self.Tags[field] = tags
-
- def NewClassView(self, name):
- self.ClassView = ClassView(self, name)
- return self.ClassView
-
- def UpdateClassView(self):
- if self.ClassView != 0:
- self.ClassView.Update()
-
- def NewClassViewInline(self, name):
- self.ClassViewInline = ClassViewInline(self, name)
- return self.ClassViewInline
-
- def UpdateClassViewInline(self):
- if self.ClassViewInline != 0:
- self.ClassViewInline.Update()
-
- def OpenViewDialog(self, vp, name, tags):
- """ opens a new dialog window for this object, or if one already exists, raises it """
- if self.ClassViewDialog != 0 and self.ClassViewDialog.Win.IsVisible():
- self.ClassViewDialog.Win.Raise()
- return
- self.ClassViewDialog = ClassViewDialog(vp, self, name, tags, giv.DlgOpts(Title=name))
- return self.ClassViewDialog
-
-class ClassViewInline(object):
- """
- ClassViewInline provides GoGi giv.StructViewInline like inline editor for
- python class objects under GoGi.
- Due to limitations on calling python callbacks across threads, you must pass a unique
- name to the constructor. The object must be a ClassViewObj, with tags using same
- syntax as the struct field tags in Go: https://github.com/goki/gi/wiki/Tags
- for customizing the view properties (space separated, name:"value")
- """
- def __init__(self, obj, name):
- """ note: essential to provide a distinctive name for each view """
- self.Class = obj
- self.Name = name
- classviews[name] = self
- self.Lay = 0
- self.Tags = obj.Tags
- self.Views = {} # dict of ValueView reps of Go objs
- self.Widgets = {} # dict of Widget reps of Python objs
-
- def FieldTags(self, field):
- """ returns the full string of tags for given field, empty string if none """
- if field in self.Tags:
- return self.Tags[field]
- return ""
-
- def FieldTagVal(self, field, key):
- """ returns the value for given key in tags for given field, empty string if none """
- return giv.StructTagVal(key, self.FieldTags(field))
-
- def Config(self):
- self.Lay = gi.Layout()
- self.Lay.InitName(self.Lay, self.Name)
- self.Lay.Lay = gi.LayoutHoriz
- self.Lay.SetStretchMaxWidth()
- updt = self.Lay.UpdateStart()
- flds = self.Class.__dict__
- self.Views = {}
- self.Widgets = {}
- for nm, val in flds.items():
- tags = self.FieldTags(nm)
- if HasTagValue(tags, "view", "-") or nm == "Tags" or nm.startswith("ClassView"):
- continue
- lbl = gi.Label(self.Lay.AddNewChild(gi.KiT_Label(), "lbl_" + nm))
- lbl.Redrawable = True
- lbl.SetProp("horizontal-align", "left")
- lbl.SetText(nm)
- dsc = self.FieldTagVal(nm, "desc")
- if dsc != "":
- lbl.Tooltip = dsc
- if isinstance(val, go.GoClass):
- fnm = self.Name + ":" + nm
- if kit.IfaceIsNil(val):
- print("Field %s is Nil in ClassView for obj: %s" % (fnm, str(self.Class)))
- continue
- vv = giv.ToValueView(val, tags)
- giv.SetSoloValueIface(vv, val)
- vw = self.Lay.AddNewChild(vv.WidgetType(), fnm)
- vv.ConfigWidget(vw)
- self.Views[nm] = vv
- self.Widgets[nm] = vw
- # todo: vv.ViewSig.Connect?
- else:
- vw = PyObjView(val, nm, self.Lay, self.Name, tags)
- self.Widgets[nm] = vw
- self.Lay.UpdateEnd(updt)
-
- def Update(self):
- updt = self.Lay.UpdateStart()
- flds = self.Class.__dict__
- for nm, val in flds.items():
- if nm in self.Views:
- vv = self.Views[nm]
- giv.SetSoloValueIface(vv, val) # always update in case it might have changed
- vv.UpdateWidget()
- elif nm in self.Widgets:
- vw = self.Widgets[nm]
- PyObjUpdtView(val, vw, nm)
- self.Lay.UpdateEnd(updt)
-
-class ClassView(object):
- """
- ClassView provides GoGi giv.StructView like editor for python class objects under GoGi.
- Due to limitations on calling python callbacks across threads, you must pass a unique
- name to the constructor. The object must be a ClassViewObj, with tags using same
- syntax as the struct field tags in Go: https://github.com/goki/gi/wiki/Tags
- for customizing the view properties (space separated, name:"value")
- """
- def __init__(self, obj, name):
- """ note: essential to provide a distinctive name for each view """
- self.Class = obj
- self.Name = name
- classviews[name] = self
- self.Frame = 0
- self.Tags = obj.Tags
- self.Views = {} # dict of ValueView reps of Go objs
- self.Widgets = {} # dict of Widget reps of Python objs
-
- def AddFrame(self, par):
- """ Add a new gi.Frame for the view to given parent gi object """
- self.Frame = gi.Frame(par.AddNewChild(gi.KiT_Frame(), "classview"))
-
- def FieldTags(self, field):
- """ returns the full string of tags for given field, empty string if none """
- if field in self.Tags:
- return self.Tags[field]
- return ""
-
- def FieldTagVal(self, field, key):
- """ returns the value for given key in tags for given field, empty string if none """
- return giv.StructTagVal(key, self.FieldTags(field))
-
- def Config(self):
- self.Frame.SetStretchMaxWidth()
- self.Frame.SetStretchMaxHeight()
- self.Frame.Lay = gi.LayoutGrid
- self.Frame.Stripes = gi.RowStripes
- self.Frame.SetPropInt("columns", 2)
- updt = self.Frame.UpdateStart()
- self.Frame.SetFullReRender()
- self.Frame.DeleteChildren(True)
- flds = self.Class.__dict__
- self.Views = {}
- self.Widgets = {}
- for nm, val in flds.items():
- tags = self.FieldTags(nm)
- if HasTagValue(tags, "view", "-") or nm == "Tags" or nm.startswith("ClassView"):
- continue
- lbl = gi.Label(self.Frame.AddNewChild(gi.KiT_Label(), "lbl_" + nm))
- lbl.SetText(nm)
- dsc = self.FieldTagVal(nm, "desc")
- if dsc != "":
- lbl.Tooltip = dsc
- if isinstance(val, go.GoClass):
- fnm = self.Name + ":" + nm
- if kit.IfaceIsNil(val):
- print("Field %s is Nil in ClassView for obj: %s" % (fnm, str(self.Class)))
- continue
- vv = giv.ToValueView(val, tags)
- giv.SetSoloValueIface(vv, val)
- vw = self.Frame.AddNewChild(vv.WidgetType(), fnm)
- vv.ConfigWidget(vw)
- self.Views[nm] = vv
- self.Widgets[nm] = vw
- # todo: vv.ViewSig.Connect?
- else:
- vw = PyObjView(val, nm, self.Frame, self.Name, tags)
- self.Widgets[nm] = vw
- self.Frame.UpdateEnd(updt)
-
- def Update(self):
- updt = self.Frame.UpdateStart()
- flds = self.Class.__dict__
- for nm, val in flds.items():
- if nm in self.Views:
- vv = self.Views[nm]
- giv.SetSoloValueIface(vv, val) # always update in case it might have changed
- vv.UpdateWidget()
- elif nm in self.Widgets:
- vw = self.Widgets[nm]
- PyObjUpdtView(val, vw, nm)
- self.Frame.UpdateEnd(updt)
-
-def ClassViewDialog(vp, obj, name, tags, opts):
- """
- ClassViewDialog returns a dialog with ClassView editor for python
- class objects under GoGi.
- opts must be a giv.DlgOpts instance
- """
- dlg = gi.NewStdDialog(opts.ToGiOpts(), opts.Ok, opts.Cancel)
- frame = dlg.Frame()
- prIdx = dlg.PromptWidgetIdx(frame)
-
- cv = obj.NewClassView(name)
- cv.Frame = gi.Frame(frame.InsertNewChild(gi.KiT_Frame(), prIdx+1, "cv-frame"))
- cv.Config()
-
- # sv.Viewport = dlg.Embed(gi.KiT_Viewport2D).(*gi.Viewport2D)
- # if opts.Inactive {
- # sv.SetInactive()
- # }
-
- dlg.UpdateEndNoSig(True)
- dlg.Open(0, 0, vp, go.nil)
- return dlg
-
-# classviews is a dictionary of classviews -- needed for callbacks
-classviews = {}
-
-def TagValue(tags, key):
- """ returns tag value for given key """
- return giv.StructTagVal(key, tags)
-
-def HasTagValue(tags, key, value):
- """ returns true if given key has given value """
- tval = giv.StructTagVal(key, tags)
- return tval == value
-
-def PyObjView(val, nm, frame, ctxt, tags):
- """
- PyObjView returns a gi.Widget representing the given Python value,
- with given name.
- frame = gi.Frame or layout to add widgets to -- also callback recv
- ctxt = context for this object (e.g., name of owning struct)
- """
- vw = 0
- fnm = ctxt + ":" + nm
- if isinstance(val, Enum):
- vw = gi.AddNewComboBox(frame, fnm)
- vw.SetText(nm)
- vw.SetPropStr("padding", "2px")
- vw.SetPropStr("margin", "2px")
- ItemsFromEnum(vw, val)
- vw.ComboSig.Connect(frame, SetEnumCB)
- elif isinstance(val, ClassViewObj):
- if HasTagValue(tags, "view", "inline"):
- sv = val.NewClassViewInline(ctxt + "_" + nm) # new full name
- sv.Config()
- frame.AddChild(sv.Lay)
- vw = sv.Lay
- else:
- vw = gi.AddNewAction(frame, fnm)
- vw.SetText(nm)
- vw.SetPropStr("padding", "2px")
- vw.SetPropStr("margin", "2px")
- vw.SetPropStr("border-radius", "4px")
- vw.ActionSig.Connect(frame, EditObjCB)
- elif isinstance(val, bool):
- vw = gi.AddNewCheckBox(frame, fnm)
- vw.SetChecked(val)
- vw.ButtonSig.Connect(frame, SetBoolValCB)
- elif isinstance(val, (int, float)):
- vw = gi.AddNewSpinBox(frame, fnm)
- vw.SetValue(val)
- if isinstance(val, int):
- vw.SpinBoxSig.Connect(frame, SetIntValCB)
- vw.Step = 1
- else:
- vw.SpinBoxSig.Connect(frame, SetFloatValCB)
- mv = TagValue(tags, "min")
- if mv != "":
- vw.SetMin(float(mv))
- mv = TagValue(tags, "max")
- if mv != "":
- vw.SetMax(float(mv))
- mv = TagValue(tags, "step")
- if mv != "":
- vw.Step = float(mv)
- mv = TagValue(tags, "format")
- if mv != "":
- vw.Format = mv
- else:
- vw = gi.AddNewTextField(frame, fnm)
- vw.SetText(str(val))
- vw.SetPropStr("min-width", "10em")
- vw.TextFieldSig.Connect(frame, SetStrValCB)
- mv = TagValue(tags, "width")
- if mv != "":
- vw.SetProp("width", mv + "ch")
- if HasTagValue(tags, "inactive", "+"):
- vw.SetInactive()
- return vw
-
-def PyObjUpdtView(val, vw, nm):
- """
- updates the given view widget for given value
- """
- if isinstance(val, Enum):
- if isinstance(vw, gi.ComboBox):
- svw = gi.ComboBox(vw)
- svw.SetCurVal(val.name)
- else:
- print("epygiv; Enum value: %s doesn't have ComboBox widget" % nm)
- elif isinstance(val, go.GoClass):
- pass
- elif isinstance(val, ClassViewObj):
- val.UpdateClassViewInline()
- val.UpdateClassView()
- elif isinstance(val, bool):
- if isinstance(vw, gi.CheckBox):
- svw = gi.CheckBox(vw)
- svw.SetChecked(val)
- else:
- print("epygiv; bool value: %s doesn't have CheckBox widget" % nm)
- elif isinstance(val, (int, float)):
- if isinstance(vw, gi.SpinBox):
- svw = gi.SpinBox(vw)
- svw.SetValue(val)
- else:
- print("epygiv; numerical value: %s doesn't have SpinBox widget" % nm)
- else:
- if isinstance(vw, gi.TextField):
- tvw = gi.TextField(vw)
- tvw.SetText(str(val))
- else:
- print("epygiv; object %s = %s doesn't have expected TextField widget" % (nm, val))
-
-def SetIntValCB(recv, send, sig, data):
- vw = gi.SpinBox(handle=send)
- nm = vw.Name()
- nms = nm.split(':')
- cv = classviews[nms[0]]
- setattr(cv.Class, nms[1], int(vw.Value))
-
-def SetFloatValCB(recv, send, sig, data):
- vw = gi.SpinBox(handle=send)
- nm = vw.Name()
- nms = nm.split(':')
- cv = classviews[nms[0]]
- setattr(cv.Class, nms[1], float(vw.Value))
-
-def EditObjCB(recv, send, sig, data):
- vw = gi.Action(handle=send)
- nm = vw.Name()
- nms = nm.split(':')
- cv = classviews[nms[0]]
- fld = getattr(cv.Class, nms[1])
- tags = cv.FieldTags(nms[1])
- nnm = nm.replace(":", "_")
- return fld.OpenViewDialog(vw.Viewport, nnm, tags)
-
-def SetStrValCB(recv, send, sig, data):
- if sig != gi.TextFieldDone:
- return
- vw = gi.TextField(handle=send)
- nm = vw.Name()
- nms = nm.split(':')
- cv = classviews[nms[0]]
- setattr(cv.Class, nms[1], vw.Text())
-
-def SetBoolValCB(recv, send, sig, data):
- if sig != gi.ButtonToggled:
- return
- vw = gi.CheckBox(handle=send)
- nm = vw.Name()
- # print("cb name:", nm)
- nms = nm.split(':')
- cv = classviews[nms[0]]
- setattr(cv.Class, nms[1], vw.IsChecked() != 0)
-
-##############
-# Enums
-
-def ItemsFromEnum(cb, enm):
- nms = []
- typ = type(enm)
- nnm = typ.__name__ + "N" # common convention of using the type name + N for last item in list
- for en in typ:
- if en.name != nnm:
- nms.append(en.name)
- cb.ItemsFromStringList(go.Slice_string(nms), False, 0)
- cb.SetCurVal(enm.name)
-
-def SetEnumCB(recv, send, sig, data):
- vw = gi.ComboBox(handle=send)
- nm = vw.Name()
- nms = nm.split(':')
- idx = vw.CurIndex
- cv = classviews[nms[0]]
- flds = cv.Class.__dict__
- typ = type(flds[nms[1]])
- vl = typ(idx)
- setattr(cv.Class, nms[1], vl)
-
-
diff --git a/python/pyside/pyparams.py b/python/pyside/pyparams.py
deleted file mode 100644
index f9e8f9654..000000000
--- a/python/pyside/pyparams.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) 2019, The Emergent Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style
-# license that can be found in the LICENSE file.
-
-from leabra import go, params
-
-def ApplyParams(cls, sheet, setMsg):
- """
- ApplyParams applies params.Sheet to cls
- """
- flds = cls.__dict__
- for sl in sheet:
- sel = params.Sel(handle=sl)
- for nm, val in sel.Params:
- flds = nm.split('.')[1:]
- tcls = cls
- for i, flnm in enumerate(flds):
- # print("name: %s, value: %s\n" % (flnm, val))
- if flnm in flds:
- cur = getattr(tcls, flnm)
- if isinstance(cur, int):
- setattr(tcls, flnm, int(val))
- elif isinstance(cur, float):
- setattr(tcls, flnm, float(val))
- else:
- if i == len(flds)-1:
- setattr(tcls, flnm, val)
- else:
- tcls = cur
- continue
- if setMsg:
- print("Field named: %s set to value: %s\n" % (flnm, val))
- else:
- print("ApplyParams error: field: %s not found in class\n" % flnm)
-
-
diff --git a/python/requirements.txt b/python/requirements.txt
deleted file mode 100644
index 1133ac0e9..000000000
--- a/python/requirements.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-pybindgen
-setuptools
-wheel
-numpy
-pandas
-torch
-
diff --git a/python/setup.py b/python/setup.py
deleted file mode 100644
index d9a5c0d3b..000000000
--- a/python/setup.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="leabra",
- version="1.1.15",
- author="emergent",
- author_email="oreilly@ucdavis.edu",
- description="Python interface to emergent neural network simulation system, in Go",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/go-python/gopy",
- packages=setuptools.find_packages(),
- classifiers=[
- "Programming Language :: Python :: 3",
- "License :: OSI Approved :: BSD License",
- "Operating System :: OS Independent",
- ],
- include_package_data=True,
-)