diff --git a/axon/act.go b/axon/act.go
index 803441deb..2f1228f7a 100644
--- a/axon/act.go
+++ b/axon/act.go
@@ -6,10 +6,10 @@ package axon
import (
"github.com/emer/axon/chans"
- "github.com/emer/emergent/erand"
- "github.com/emer/etable/minmax"
+ "github.com/emer/emergent/v2/erand"
"github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/mat32/v2"
)
///////////////////////////////////////////////////////////////////////
@@ -31,38 +31,38 @@ import (
// the AdEx adaptive exponential function (adapt is KNaAdapt)
type SpikeParams struct {
- // [def: 0.5] threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization
- Thr float32 `def:"0.5" desc:"threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization"`
+ // threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization
+ Thr float32 `def:"0.5"`
- // [def: 0.3] post-spiking membrane potential to reset to, produces refractory effect if lower than VmInit -- 0.3 is apropriate biologically-based value for AdEx (Brette & Gurstner, 2005) parameters. See also RTau
- VmR float32 `def:"0.3" desc:"post-spiking membrane potential to reset to, produces refractory effect if lower than VmInit -- 0.3 is apropriate biologically-based value for AdEx (Brette & Gurstner, 2005) parameters. See also RTau"`
+ // post-spiking membrane potential to reset to, produces refractory effect if lower than VmInit -- 0.3 is apropriate biologically-based value for AdEx (Brette & Gurstner, 2005) parameters. See also RTau
+ VmR float32 `def:"0.3"`
- // [def: 3] [min: 1] post-spiking explicit refractory period, in cycles -- prevents Vm updating for this number of cycles post firing -- Vm is reduced in exponential steps over this period according to RTau, being fixed at Tr to VmR exactly
- Tr int32 `min:"1" def:"3" desc:"post-spiking explicit refractory period, in cycles -- prevents Vm updating for this number of cycles post firing -- Vm is reduced in exponential steps over this period according to RTau, being fixed at Tr to VmR exactly"`
+ // post-spiking explicit refractory period, in cycles -- prevents Vm updating for this number of cycles post firing -- Vm is reduced in exponential steps over this period according to RTau, being fixed at Tr to VmR exactly
+ Tr int32 `min:"1" def:"3"`
- // [def: 1.6667] time constant for decaying Vm down to VmR -- at end of Tr it is set to VmR exactly -- this provides a more realistic shape of the post-spiking Vm which is only relevant for more realistic channels that key off of Vm -- does not otherwise affect standard computation
- RTau float32 `def:"1.6667" desc:"time constant for decaying Vm down to VmR -- at end of Tr it is set to VmR exactly -- this provides a more realistic shape of the post-spiking Vm which is only relevant for more realistic channels that key off of Vm -- does not otherwise affect standard computation"`
+ // time constant for decaying Vm down to VmR -- at end of Tr it is set to VmR exactly -- this provides a more realistic shape of the post-spiking Vm which is only relevant for more realistic channels that key off of Vm -- does not otherwise affect standard computation
+ RTau float32 `def:"1.6667"`
- // [def: true] if true, turn on exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation
- Exp slbool.Bool `def:"true" desc:"if true, turn on exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation"`
+ // if true, turn on exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation
+ Exp slbool.Bool `def:"true"`
- // [def: 0.02] [viewif: Exp] slope in Vm (2 mV = .02 in normalized units) for extra exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation
- ExpSlope float32 `viewif:"Exp" def:"0.02" desc:"slope in Vm (2 mV = .02 in normalized units) for extra exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation"`
+ // slope in Vm (2 mV = .02 in normalized units) for extra exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation
+ ExpSlope float32 `viewif:"Exp" def:"0.02"`
- // [def: 0.9] [viewif: Exp] membrane potential threshold for actually triggering a spike when using the exponential mechanism
- ExpThr float32 `viewif:"Exp" def:"0.9" desc:"membrane potential threshold for actually triggering a spike when using the exponential mechanism"`
+ // membrane potential threshold for actually triggering a spike when using the exponential mechanism
+ ExpThr float32 `viewif:"Exp" def:"0.9"`
- // [def: 180] [min: 1] for translating spiking interval (rate) into rate-code activation equivalent, what is the maximum firing rate associated with a maximum activation value of 1
- MaxHz float32 `def:"180" min:"1" desc:"for translating spiking interval (rate) into rate-code activation equivalent, what is the maximum firing rate associated with a maximum activation value of 1"`
+ // for translating spiking interval (rate) into rate-code activation equivalent, what is the maximum firing rate associated with a maximum activation value of 1
+ MaxHz float32 `def:"180" min:"1"`
- // [def: 5] [min: 1] constant for integrating the spiking interval in estimating spiking rate
- ISITau float32 `def:"5" min:"1" desc:"constant for integrating the spiking interval in estimating spiking rate"`
+ // constant for integrating the spiking interval in estimating spiking rate
+ ISITau float32 `def:"5" min:"1"`
- // [view: -] rate = 1 / tau
- ISIDt float32 `view:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ ISIDt float32 `view:"-"`
- // [view: -] rate = 1 / tau
- RDt float32 `view:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ RDt float32 `view:"-"`
pad int32
}
@@ -124,23 +124,23 @@ func (sk *SpikeParams) AvgFmISI(avg float32, isi float32) float32 {
// DendParams are the parameters for updating dendrite-specific dynamics
type DendParams struct {
- // [def: 0.2,0.5] dendrite-specific strength multiplier of the exponential spiking drive on Vm -- e.g., .5 makes it half as strong as at the soma (which uses Gbar.L as a strength multiplier per the AdEx standard model)
- GbarExp float32 `def:"0.2,0.5" desc:"dendrite-specific strength multiplier of the exponential spiking drive on Vm -- e.g., .5 makes it half as strong as at the soma (which uses Gbar.L as a strength multiplier per the AdEx standard model)"`
+ // dendrite-specific strength multiplier of the exponential spiking drive on Vm -- e.g., .5 makes it half as strong as at the soma (which uses Gbar.L as a strength multiplier per the AdEx standard model)
+ GbarExp float32 `def:"0.2,0.5"`
- // [def: 3,6] dendrite-specific conductance of Kdr delayed rectifier currents, used to reset membrane potential for dendrite -- applied for Tr msec
- GbarR float32 `def:"3,6" desc:"dendrite-specific conductance of Kdr delayed rectifier currents, used to reset membrane potential for dendrite -- applied for Tr msec"`
+ // dendrite-specific conductance of Kdr delayed rectifier currents, used to reset membrane potential for dendrite -- applied for Tr msec
+ GbarR float32 `def:"3,6"`
- // [def: 0,2] SST+ somatostatin positive slow spiking inhibition level specifically affecting dendritic Vm (VmDend) -- this is important for countering a positive feedback loop from NMDA getting stronger over the course of learning -- also typically requires SubMean = 1 for TrgAvgAct and learning to fully counter this feedback loop.
- SSGi float32 `def:"0,2" desc:"SST+ somatostatin positive slow spiking inhibition level specifically affecting dendritic Vm (VmDend) -- this is important for countering a positive feedback loop from NMDA getting stronger over the course of learning -- also typically requires SubMean = 1 for TrgAvgAct and learning to fully counter this feedback loop."`
+ // SST+ somatostatin positive slow spiking inhibition level specifically affecting dendritic Vm (VmDend) -- this is important for countering a positive feedback loop from NMDA getting stronger over the course of learning -- also typically requires SubMean = 1 for TrgAvgAct and learning to fully counter this feedback loop.
+ SSGi float32 `def:"0,2"`
// set automatically based on whether this layer has any recv projections that have a GType conductance type of Modulatory -- if so, then multiply GeSyn etc by GModSyn
- HasMod slbool.Bool `inactive:"+" desc:"set automatically based on whether this layer has any recv projections that have a GType conductance type of Modulatory -- if so, then multiply GeSyn etc by GModSyn"`
+ HasMod slbool.Bool `inactive:"+"`
// multiplicative gain factor on the total modulatory input -- this can also be controlled by the PrjnScale.Abs factor on ModulatoryG inputs, but it is convenient to be able to control on the layer as well.
- ModGain float32 `desc:"multiplicative gain factor on the total modulatory input -- this can also be controlled by the PrjnScale.Abs factor on ModulatoryG inputs, but it is convenient to be able to control on the layer as well."`
+ ModGain float32
// baseline modulatory level for modulatory effects -- net modulation is ModBase + ModGain * GModSyn
- ModBase float32 `desc:"baseline modulatory level for modulatory effects -- net modulation is ModBase + ModGain * GModSyn"`
+ ModBase float32
pad, pad1 int32
}
@@ -163,23 +163,23 @@ func (dp *DendParams) Update() {
// Initialized in InitActs called by InitWts, and provides target values for DecayState.
type ActInitParams struct {
- // [def: 0.3] initial membrane potential -- see Erev.L for the resting potential (typically .3)
- Vm float32 `def:"0.3" desc:"initial membrane potential -- see Erev.L for the resting potential (typically .3)"`
+ // initial membrane potential -- see Erev.L for the resting potential (typically .3)
+ Vm float32 `def:"0.3"`
- // [def: 0] initial activation value -- typically 0
- Act float32 `def:"0" desc:"initial activation value -- typically 0"`
+ // initial activation value -- typically 0
+ Act float32 `def:"0"`
- // [def: 0] baseline level of excitatory conductance (net input) -- Ge is initialized to this value, and it is added in as a constant background level of excitatory input -- captures all the other inputs not represented in the model, and intrinsic excitability, etc
- GeBase float32 `def:"0" desc:"baseline level of excitatory conductance (net input) -- Ge is initialized to this value, and it is added in as a constant background level of excitatory input -- captures all the other inputs not represented in the model, and intrinsic excitability, etc"`
+ // baseline level of excitatory conductance (net input) -- Ge is initialized to this value, and it is added in as a constant background level of excitatory input -- captures all the other inputs not represented in the model, and intrinsic excitability, etc
+ GeBase float32 `def:"0"`
- // [def: 0] baseline level of inhibitory conductance (net input) -- Gi is initialized to this value, and it is added in as a constant background level of inhibitory input -- captures all the other inputs not represented in the model
- GiBase float32 `def:"0" desc:"baseline level of inhibitory conductance (net input) -- Gi is initialized to this value, and it is added in as a constant background level of inhibitory input -- captures all the other inputs not represented in the model"`
+ // baseline level of inhibitory conductance (net input) -- Gi is initialized to this value, and it is added in as a constant background level of inhibitory input -- captures all the other inputs not represented in the model
+ GiBase float32 `def:"0"`
- // [def: 0] variance (sigma) of gaussian distribution around baseline Ge values, per unit, to establish variability in intrinsic excitability. value never goes < 0
- GeVar float32 `def:"0" desc:"variance (sigma) of gaussian distribution around baseline Ge values, per unit, to establish variability in intrinsic excitability. value never goes < 0"`
+ // variance (sigma) of gaussian distribution around baseline Ge values, per unit, to establish variability in intrinsic excitability. value never goes < 0
+ GeVar float32 `def:"0"`
- // [def: 0] variance (sigma) of gaussian distribution around baseline Gi values, per unit, to establish variability in intrinsic excitability. value never goes < 0
- GiVar float32 `def:"0" desc:"variance (sigma) of gaussian distribution around baseline Gi values, per unit, to establish variability in intrinsic excitability. value never goes < 0"`
+ // variance (sigma) of gaussian distribution around baseline Gi values, per unit, to establish variability in intrinsic excitability. value never goes < 0
+ GiVar float32 `def:"0"`
pad, pad1 int32
}
@@ -231,20 +231,20 @@ func (ai *ActInitParams) GetGiBase(rnd erand.Rand) float32 {
// called in NewState when a new state is to be processed.
type DecayParams struct {
- // [def: 0,0.2,0.5,1] [min: 0] [max: 1] proportion to decay most activation state variables toward initial values at start of every ThetaCycle (except those controlled separately below) -- if 1 it is effectively equivalent to full clear, resetting other derived values. ISI is reset every AlphaCycle to get a fresh sample of activations (doesn't affect direct computation -- only readout).
- Act float32 `def:"0,0.2,0.5,1" max:"1" min:"0" desc:"proportion to decay most activation state variables toward initial values at start of every ThetaCycle (except those controlled separately below) -- if 1 it is effectively equivalent to full clear, resetting other derived values. ISI is reset every AlphaCycle to get a fresh sample of activations (doesn't affect direct computation -- only readout)."`
+ // proportion to decay most activation state variables toward initial values at start of every ThetaCycle (except those controlled separately below) -- if 1 it is effectively equivalent to full clear, resetting other derived values. ISI is reset every AlphaCycle to get a fresh sample of activations (doesn't affect direct computation -- only readout).
+ Act float32 `def:"0,0.2,0.5,1" max:"1" min:"0"`
- // [def: 0,0.6] [min: 0] [max: 1] proportion to decay long-lasting conductances, NMDA and GABA, and also the dendritic membrane potential -- when using random stimulus order, it is important to decay this significantly to allow a fresh start -- but set Act to 0 to enable ongoing activity to keep neurons in their sensitive regime.
- Glong float32 `def:"0,0.6" max:"1" min:"0" desc:"proportion to decay long-lasting conductances, NMDA and GABA, and also the dendritic membrane potential -- when using random stimulus order, it is important to decay this significantly to allow a fresh start -- but set Act to 0 to enable ongoing activity to keep neurons in their sensitive regime."`
+ // proportion to decay long-lasting conductances, NMDA and GABA, and also the dendritic membrane potential -- when using random stimulus order, it is important to decay this significantly to allow a fresh start -- but set Act to 0 to enable ongoing activity to keep neurons in their sensitive regime.
+ Glong float32 `def:"0,0.6" max:"1" min:"0"`
- // [def: 0] [min: 0] [max: 1] decay of afterhyperpolarization currents, including mAHP, sAHP, and KNa -- has a separate decay because often useful to have this not decay at all even if decay is on.
- AHP float32 `def:"0" max:"1" min:"0" desc:"decay of afterhyperpolarization currents, including mAHP, sAHP, and KNa -- has a separate decay because often useful to have this not decay at all even if decay is on."`
+ // decay of afterhyperpolarization currents, including mAHP, sAHP, and KNa -- has a separate decay because often useful to have this not decay at all even if decay is on.
+ AHP float32 `def:"0" max:"1" min:"0"`
- // [def: 0] [min: 0] [max: 1] decay of Ca variables driven by spiking activity used in learning: CaSpk* and Ca* variables. These are typically not decayed but may need to be in some situations.
- LearnCa float32 `def:"0" max:"1" min:"0" desc:"decay of Ca variables driven by spiking activity used in learning: CaSpk* and Ca* variables. These are typically not decayed but may need to be in some situations."`
+ // decay of Ca variables driven by spiking activity used in learning: CaSpk* and Ca* variables. These are typically not decayed but may need to be in some situations.
+ LearnCa float32 `def:"0" max:"1" min:"0"`
// decay layer at end of ThetaCycle when there is a global reward -- true by default for PTPred, PTMaint and PFC Super layers
- OnRew slbool.Bool `desc:"decay layer at end of ThetaCycle when there is a global reward -- true by default for PTPred, PTMaint and PFC Super layers"`
+ OnRew slbool.Bool
pad, pad1, pad2 float32
}
@@ -265,53 +265,53 @@ func (dp *DecayParams) Defaults() {
// DtParams are time and rate constants for temporal derivatives in Axon (Vm, G)
type DtParams struct {
- // [def: 1,0.5] [min: 0] overall rate constant for numerical integration, for all equations at the unit level -- all time constants are specified in millisecond units, with one cycle = 1 msec -- if you instead want to make one cycle = 2 msec, you can do this globally by setting this integ value to 2 (etc). However, stability issues will likely arise if you go too high. For improved numerical stability, you may even need to reduce this value to 0.5 or possibly even lower (typically however this is not necessary). MUST also coordinate this with network.time_inc variable to ensure that global network.time reflects simulated time accurately
- Integ float32 `def:"1,0.5" min:"0" desc:"overall rate constant for numerical integration, for all equations at the unit level -- all time constants are specified in millisecond units, with one cycle = 1 msec -- if you instead want to make one cycle = 2 msec, you can do this globally by setting this integ value to 2 (etc). However, stability issues will likely arise if you go too high. For improved numerical stability, you may even need to reduce this value to 0.5 or possibly even lower (typically however this is not necessary). MUST also coordinate this with network.time_inc variable to ensure that global network.time reflects simulated time accurately"`
+ // overall rate constant for numerical integration, for all equations at the unit level -- all time constants are specified in millisecond units, with one cycle = 1 msec -- if you instead want to make one cycle = 2 msec, you can do this globally by setting this integ value to 2 (etc). However, stability issues will likely arise if you go too high. For improved numerical stability, you may even need to reduce this value to 0.5 or possibly even lower (typically however this is not necessary). MUST also coordinate this with network.time_inc variable to ensure that global network.time reflects simulated time accurately
+ Integ float32 `def:"1,0.5" min:"0"`
- // [def: 2.81] [min: 1] membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized
- VmTau float32 `def:"2.81" min:"1" desc:"membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized"`
+ // membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized
+ VmTau float32 `def:"2.81" min:"1"`
- // [def: 5] [min: 1] dendritic membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized
- VmDendTau float32 `def:"5" min:"1" desc:"dendritic membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized"`
+ // dendritic membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized
+ VmDendTau float32 `def:"5" min:"1"`
- // [def: 2] [min: 1] number of integration steps to take in computing new Vm value -- this is the one computation that can be most numerically unstable so taking multiple steps with proportionally smaller dt is beneficial
- VmSteps int32 `def:"2" min:"1" desc:"number of integration steps to take in computing new Vm value -- this is the one computation that can be most numerically unstable so taking multiple steps with proportionally smaller dt is beneficial"`
+ // number of integration steps to take in computing new Vm value -- this is the one computation that can be most numerically unstable so taking multiple steps with proportionally smaller dt is beneficial
+ VmSteps int32 `def:"2" min:"1"`
- // [def: 5] [min: 1] time constant for decay of excitatory AMPA receptor conductance.
- GeTau float32 `def:"5" min:"1" desc:"time constant for decay of excitatory AMPA receptor conductance."`
+ // time constant for decay of excitatory AMPA receptor conductance.
+ GeTau float32 `def:"5" min:"1"`
- // [def: 7] [min: 1] time constant for decay of inhibitory GABAa receptor conductance.
- GiTau float32 `def:"7" min:"1" desc:"time constant for decay of inhibitory GABAa receptor conductance."`
+ // time constant for decay of inhibitory GABAa receptor conductance.
+ GiTau float32 `def:"7" min:"1"`
- // [def: 40] [min: 1] time constant for integrating values over timescale of an individual input state (e.g., roughly 200 msec -- theta cycle), used in computing ActInt, GeInt from Ge, and GiInt from GiSyn -- this is used for scoring performance, not for learning, in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life),
- IntTau float32 `def:"40" min:"1" desc:"time constant for integrating values over timescale of an individual input state (e.g., roughly 200 msec -- theta cycle), used in computing ActInt, GeInt from Ge, and GiInt from GiSyn -- this is used for scoring performance, not for learning, in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life), "`
+ // time constant for integrating values over timescale of an individual input state (e.g., roughly 200 msec -- theta cycle), used in computing ActInt, GeInt from Ge, and GiInt from GiSyn -- this is used for scoring performance, not for learning, in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life),
+ IntTau float32 `def:"40" min:"1"`
- // [def: 20] [min: 1] time constant for integrating slower long-time-scale averages, such as nrn.ActAvg, Pool.ActsMAvg, ActsPAvg -- computed in NewState when a new input state is present (i.e., not msec but in units of a theta cycle) (tau is roughly how long it takes for value to change significantly) -- set lower for smaller models
- LongAvgTau float32 `def:"20" min:"1" desc:"time constant for integrating slower long-time-scale averages, such as nrn.ActAvg, Pool.ActsMAvg, ActsPAvg -- computed in NewState when a new input state is present (i.e., not msec but in units of a theta cycle) (tau is roughly how long it takes for value to change significantly) -- set lower for smaller models"`
+ // time constant for integrating slower long-time-scale averages, such as nrn.ActAvg, Pool.ActsMAvg, ActsPAvg -- computed in NewState when a new input state is present (i.e., not msec but in units of a theta cycle) (tau is roughly how long it takes for value to change significantly) -- set lower for smaller models
+ LongAvgTau float32 `def:"20" min:"1"`
- // [def: 10] [min: 0] cycle to start updating the SpkMaxCa, SpkMax values within a theta cycle -- early cycles often reflect prior state
- MaxCycStart int32 `def:"10" min:"0" desc:"cycle to start updating the SpkMaxCa, SpkMax values within a theta cycle -- early cycles often reflect prior state"`
+ // cycle to start updating the SpkMaxCa, SpkMax values within a theta cycle -- early cycles often reflect prior state
+ MaxCycStart int32 `def:"10" min:"0"`
- // [view: -] nominal rate = Integ / tau
- VmDt float32 `view:"-" json:"-" xml:"-" desc:"nominal rate = Integ / tau"`
+ // nominal rate = Integ / tau
+ VmDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] nominal rate = Integ / tau
- VmDendDt float32 `view:"-" json:"-" xml:"-" desc:"nominal rate = Integ / tau"`
+ // nominal rate = Integ / tau
+ VmDendDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] 1 / VmSteps
- DtStep float32 `view:"-" json:"-" xml:"-" desc:"1 / VmSteps"`
+ // 1 / VmSteps
+ DtStep float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = Integ / tau
- GeDt float32 `view:"-" json:"-" xml:"-" desc:"rate = Integ / tau"`
+ // rate = Integ / tau
+ GeDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = Integ / tau
- GiDt float32 `view:"-" json:"-" xml:"-" desc:"rate = Integ / tau"`
+ // rate = Integ / tau
+ GiDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = Integ / tau
- IntDt float32 `view:"-" json:"-" xml:"-" desc:"rate = Integ / tau"`
+ // rate = Integ / tau
+ IntDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- LongAvgDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ LongAvgDt float32 `view:"-" json:"-" xml:"-"`
}
func (dp *DtParams) Update() {
@@ -393,25 +393,25 @@ func (dp *DtParams) AvgVarUpdt(avg, vr *float32, val float32) {
type SpikeNoiseParams struct {
// add noise simulating background spiking levels
- On slbool.Bool `desc:"add noise simulating background spiking levels"`
+ On slbool.Bool
- // [def: 100] [viewif: On] mean frequency of excitatory spikes -- typically 50Hz but multiple inputs increase rate -- poisson lambda parameter, also the variance
- GeHz float32 `viewif:"On" def:"100" desc:"mean frequency of excitatory spikes -- typically 50Hz but multiple inputs increase rate -- poisson lambda parameter, also the variance"`
+ // mean frequency of excitatory spikes -- typically 50Hz but multiple inputs increase rate -- poisson lambda parameter, also the variance
+ GeHz float32 `viewif:"On" def:"100"`
- // [viewif: On] [min: 0] excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs
- Ge float32 `viewif:"On" min:"0" desc:"excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs"`
+ // excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs
+ Ge float32 `viewif:"On" min:"0"`
- // [def: 200] [viewif: On] mean frequency of inhibitory spikes -- typically 100Hz fast spiking but multiple inputs increase rate -- poisson lambda parameter, also the variance
- GiHz float32 `viewif:"On" def:"200" desc:"mean frequency of inhibitory spikes -- typically 100Hz fast spiking but multiple inputs increase rate -- poisson lambda parameter, also the variance"`
+ // mean frequency of inhibitory spikes -- typically 100Hz fast spiking but multiple inputs increase rate -- poisson lambda parameter, also the variance
+ GiHz float32 `viewif:"On" def:"200"`
- // [viewif: On] [min: 0] excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs
- Gi float32 `viewif:"On" min:"0" desc:"excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs"`
+ // excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs
+ Gi float32 `viewif:"On" min:"0"`
- // [view: -] Exp(-Interval) which is the threshold for GeNoiseP as it is updated
- GeExpInt float32 `view:"-" json:"-" xml:"-" desc:"Exp(-Interval) which is the threshold for GeNoiseP as it is updated"`
+ // Exp(-Interval) which is the threshold for GeNoiseP as it is updated
+ GeExpInt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] Exp(-Interval) which is the threshold for GiNoiseP as it is updated
- GiExpInt float32 `view:"-" json:"-" xml:"-" desc:"Exp(-Interval) which is the threshold for GiNoiseP as it is updated"`
+ // Exp(-Interval) which is the threshold for GiNoiseP as it is updated
+ GiExpInt float32 `view:"-" json:"-" xml:"-"`
pad int32
}
@@ -460,19 +460,19 @@ func (an *SpikeNoiseParams) PGi(ctx *Context, p *float32, ni uint32) float32 {
type ClampParams struct {
// is this a clamped input layer? set automatically based on layer type at initialization
- IsInput slbool.Bool `inactive:"+" desc:"is this a clamped input layer? set automatically based on layer type at initialization"`
+ IsInput slbool.Bool `inactive:"+"`
// is this a target layer? set automatically based on layer type at initialization
- IsTarget slbool.Bool `inactive:"+" desc:"is this a target layer? set automatically based on layer type at initialization"`
+ IsTarget slbool.Bool `inactive:"+"`
- // [def: 0.8,1.5] amount of Ge driven for clamping -- generally use 0.8 for Target layers, 1.5 for Input layers
- Ge float32 `def:"0.8,1.5" desc:"amount of Ge driven for clamping -- generally use 0.8 for Target layers, 1.5 for Input layers"`
+ // amount of Ge driven for clamping -- generally use 0.8 for Target layers, 1.5 for Input layers
+ Ge float32 `def:"0.8,1.5"`
- // [def: false] [view: add external conductance on top of any existing -- generally this is not a good idea for target layers (creates a main effect that learning can never match), but may be ok for input layers]
+ //
Add slbool.Bool `def:"false" view:"add external conductance on top of any existing -- generally this is not a good idea for target layers (creates a main effect that learning can never match), but may be ok for input layers"`
- // [def: 0.5] threshold on neuron Act activity to count as active for computing error relative to target in PctErr method
- ErrThr float32 `def:"0.5" desc:"threshold on neuron Act activity to count as active for computing error relative to target in PctErr method"`
+ // threshold on neuron Act activity to count as active for computing error relative to target in PctErr method
+ ErrThr float32 `def:"0.5"`
pad, pad1, pad2 float32
}
@@ -492,13 +492,13 @@ func (cp *ClampParams) Defaults() {
type AttnParams struct {
// is attentional modulation active?
- On slbool.Bool `desc:"is attentional modulation active?"`
+ On slbool.Bool
- // [viewif: On] minimum act multiplier if attention is 0
- Min float32 `viewif:"On" desc:"minimum act multiplier if attention is 0"`
+ // minimum act multiplier if attention is 0
+ Min float32 `viewif:"On"`
// threshold on CaSpkP for determining the reaction time for the Layer -- starts after MaxCycStart to ensure that prior trial activity has had a chance to dissipate.
- RTThr float32 `desc:"threshold on CaSpkP for determining the reaction time for the Layer -- starts after MaxCycStart to ensure that prior trial activity has had a chance to dissipate."`
+ RTThr float32
pad int32
}
@@ -535,28 +535,28 @@ func (at *AttnParams) ModVal(val float32, attn float32) float32 {
type PopCodeParams struct {
// use popcode encoding of variable(s) that this layer represents
- On slbool.Bool `desc:"use popcode encoding of variable(s) that this layer represents"`
+ On slbool.Bool
- // [def: 0.1] [viewif: On] Ge multiplier for driving excitatory conductance based on PopCode -- multiplies normalized activation values
- Ge float32 `viewif:"On" def:"0.1" desc:"Ge multiplier for driving excitatory conductance based on PopCode -- multiplies normalized activation values"`
+ // Ge multiplier for driving excitatory conductance based on PopCode -- multiplies normalized activation values
+ Ge float32 `viewif:"On" def:"0.1"`
- // [def: -0.1] [viewif: On] minimum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode
- Min float32 `viewif:"On" def:"-0.1" desc:"minimum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode"`
+ // minimum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode
+ Min float32 `viewif:"On" def:"-0.1"`
- // [def: 1.1] [viewif: On] maximum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode
- Max float32 `viewif:"On" def:"1.1" desc:"maximum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode"`
+ // maximum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode
+ Max float32 `viewif:"On" def:"1.1"`
- // [def: 1,0.5] [viewif: On] activation multiplier for values at Min end of range, where values at Max end have an activation of 1 -- if this is < 1, then there is a rate code proportional to the value in addition to the popcode pattern -- see also MinSigma, MaxSigma
- MinAct float32 `viewif:"On" def:"1,0.5" desc:"activation multiplier for values at Min end of range, where values at Max end have an activation of 1 -- if this is < 1, then there is a rate code proportional to the value in addition to the popcode pattern -- see also MinSigma, MaxSigma"`
+ // activation multiplier for values at Min end of range, where values at Max end have an activation of 1 -- if this is < 1, then there is a rate code proportional to the value in addition to the popcode pattern -- see also MinSigma, MaxSigma
+ MinAct float32 `viewif:"On" def:"1,0.5"`
- // [def: 0.1,0.08] [viewif: On] sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally
- MinSigma float32 `viewif:"On" def:"0.1,0.08" desc:"sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally"`
+ // sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally
+ MinSigma float32 `viewif:"On" def:"0.1,0.08"`
- // [def: 0.1,0.12] [viewif: On] sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally
- MaxSigma float32 `viewif:"On" def:"0.1,0.12" desc:"sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally"`
+ // sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally
+ MaxSigma float32 `viewif:"On" def:"0.1,0.12"`
- // [viewif: On] ensure that encoded and decoded value remains within specified range
- Clip slbool.Bool `viewif:"On" desc:"ensure that encoded and decoded value remains within specified range"`
+ // ensure that encoded and decoded value remains within specified range
+ Clip slbool.Bool `viewif:"On"`
}
func (pc *PopCodeParams) Defaults() {
@@ -635,68 +635,68 @@ func (pc *PopCodeParams) EncodeGe(i, n uint32, val float32) float32 {
// This is included in axon.Layer to drive the computation.
type ActParams struct {
- // [view: inline] Spiking function parameters
- Spikes SpikeParams `view:"inline" desc:"Spiking function parameters"`
+ // Spiking function parameters
+ Spikes SpikeParams `view:"inline"`
- // [view: inline] dendrite-specific parameters
- Dend DendParams `view:"inline" desc:"dendrite-specific parameters"`
+ // dendrite-specific parameters
+ Dend DendParams `view:"inline"`
- // [view: inline] initial values for key network state variables -- initialized in InitActs called by InitWts, and provides target values for DecayState
- Init ActInitParams `view:"inline" desc:"initial values for key network state variables -- initialized in InitActs called by InitWts, and provides target values for DecayState"`
+ // initial values for key network state variables -- initialized in InitActs called by InitWts, and provides target values for DecayState
+ Init ActInitParams `view:"inline"`
- // [view: inline] amount to decay between AlphaCycles, simulating passage of time and effects of saccades etc, especially important for environments with random temporal structure (e.g., most standard neural net training corpora)
- Decay DecayParams `view:"inline" desc:"amount to decay between AlphaCycles, simulating passage of time and effects of saccades etc, especially important for environments with random temporal structure (e.g., most standard neural net training corpora) "`
+ // amount to decay between AlphaCycles, simulating passage of time and effects of saccades etc, especially important for environments with random temporal structure (e.g., most standard neural net training corpora)
+ Decay DecayParams `view:"inline"`
- // [view: inline] time and rate constants for temporal derivatives / updating of activation state
- Dt DtParams `view:"inline" desc:"time and rate constants for temporal derivatives / updating of activation state"`
+ // time and rate constants for temporal derivatives / updating of activation state
+ Dt DtParams `view:"inline"`
- // [view: inline] [Defaults: 1, .2, 1, 1] maximal conductances levels for channels
- Gbar chans.Chans `view:"inline" desc:"[Defaults: 1, .2, 1, 1] maximal conductances levels for channels"`
+ // maximal conductances levels for channels
+ Gbar chans.Chans `view:"inline"`
- // [view: inline] [Defaults: 1, .3, .25, .1] reversal potentials for each channel
- Erev chans.Chans `view:"inline" desc:"[Defaults: 1, .3, .25, .1] reversal potentials for each channel"`
+ // reversal potentials for each channel
+ Erev chans.Chans `view:"inline"`
- // [view: inline] how external inputs drive neural activations
- Clamp ClampParams `view:"inline" desc:"how external inputs drive neural activations"`
+ // how external inputs drive neural activations
+ Clamp ClampParams `view:"inline"`
- // [view: inline] how, where, when, and how much noise to add
- Noise SpikeNoiseParams `view:"inline" desc:"how, where, when, and how much noise to add"`
+ // how, where, when, and how much noise to add
+ Noise SpikeNoiseParams `view:"inline"`
- // [view: inline] range for Vm membrane potential -- [0.1, 1.0] -- important to keep just at extreme range of reversal potentials to prevent numerical instability
- VmRange minmax.F32 `view:"inline" desc:"range for Vm membrane potential -- [0.1, 1.0] -- important to keep just at extreme range of reversal potentials to prevent numerical instability"`
+ // range for Vm membrane potential -- -- important to keep just at extreme range of reversal potentials to prevent numerical instability
+ VmRange minmax.F32 `view:"inline"`
- // [view: inline] M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes
- Mahp chans.MahpParams `view:"inline" desc:"M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes"`
+ // M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes
+ Mahp chans.MahpParams `view:"inline"`
- // [view: inline] slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron
- Sahp chans.SahpParams `view:"inline" desc:"slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron"`
+ // slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron
+ Sahp chans.SahpParams `view:"inline"`
- // [view: inline] sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)
- KNa chans.KNaMedSlow `view:"inline" desc:"sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)"`
+ // sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)
+ KNa chans.KNaMedSlow `view:"inline"`
- // [view: inline] NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning.
- NMDA chans.NMDAParams `view:"inline" desc:"NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning."`
+ // NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning.
+ NMDA chans.NMDAParams `view:"inline"`
- // [view: inline] NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning.
- MaintNMDA chans.NMDAParams `view:"inline" desc:"NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning."`
+ // NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning.
+ MaintNMDA chans.NMDAParams `view:"inline"`
- // [view: inline] GABA-B / GIRK channel parameters
- GabaB chans.GABABParams `view:"inline" desc:"GABA-B / GIRK channel parameters"`
+ // GABA-B / GIRK channel parameters
+ GabaB chans.GABABParams `view:"inline"`
- // [view: inline] voltage gated calcium channels -- provide a key additional source of Ca for learning and positive-feedback loop upstate for active neurons
- VGCC chans.VGCCParams `view:"inline" desc:"voltage gated calcium channels -- provide a key additional source of Ca for learning and positive-feedback loop upstate for active neurons"`
+ // voltage gated calcium channels -- provide a key additional source of Ca for learning and positive-feedback loop upstate for active neurons
+ VGCC chans.VGCCParams `view:"inline"`
- // [view: inline] A-type potassium (K) channel that is particularly important for limiting the runaway excitation from VGCC channels
- AK chans.AKsParams `view:"inline" desc:"A-type potassium (K) channel that is particularly important for limiting the runaway excitation from VGCC channels"`
+ // A-type potassium (K) channel that is particularly important for limiting the runaway excitation from VGCC channels
+ AK chans.AKsParams `view:"inline"`
- // [view: inline] small-conductance calcium-activated potassium channel produces the pausing function as a consequence of rapid bursting.
- SKCa chans.SKCaParams `view:"inline" desc:"small-conductance calcium-activated potassium channel produces the pausing function as a consequence of rapid bursting."`
+ // small-conductance calcium-activated potassium channel produces the pausing function as a consequence of rapid bursting.
+ SKCa chans.SKCaParams `view:"inline"`
- // [view: inline] Attentional modulation parameters: how Attn modulates Ge
- AttnMod AttnParams `view:"inline" desc:"Attentional modulation parameters: how Attn modulates Ge"`
+ // Attentional modulation parameters: how Attn modulates Ge
+ AttnMod AttnParams `view:"inline"`
- // [view: inline] provides encoding population codes, used to represent a single continuous (scalar) value, across a population of units / neurons (1 dimensional)
- PopCode PopCodeParams `view:"inline" desc:"provides encoding population codes, used to represent a single continuous (scalar) value, across a population of units / neurons (1 dimensional)"`
+ // provides encoding population codes, used to represent a single continuous (scalar) value, across a population of units / neurons (1 dimensional)
+ PopCode PopCodeParams `view:"inline"`
}
func (ac *ActParams) Defaults() {
diff --git a/axon/act_prjn.go b/axon/act_prjn.go
index 41fe2bb2b..f7872ce7a 100644
--- a/axon/act_prjn.go
+++ b/axon/act_prjn.go
@@ -7,11 +7,11 @@ package axon
import (
"log"
- "github.com/emer/emergent/erand"
+ "github.com/emer/emergent/v2/erand"
"github.com/goki/gosl/slbool"
"github.com/goki/ki/ints"
"github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//go:generate stringer -type=PrjnGTypes
@@ -63,22 +63,22 @@ const (
type SynComParams struct {
// type of conductance (G) communicated by this projection
- GType PrjnGTypes `desc:"type of conductance (G) communicated by this projection"`
+ GType PrjnGTypes
- // [def: 2] [min: 0] additional synaptic delay in msec for inputs arriving at this projection. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Prjn in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value.
- Delay uint32 `min:"0" def:"2" desc:"additional synaptic delay in msec for inputs arriving at this projection. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Prjn in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value."`
+ // additional synaptic delay in msec for inputs arriving at this projection. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Prjn in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value.
+ Delay uint32 `min:"0" def:"2"`
// maximum value of Delay -- based on MaxDelay values when the BuildGBuf function was called when the network was built -- cannot set it longer than this, except by calling BuildGBuf on network after changing MaxDelay to a larger value in any projection in the network.
- MaxDelay uint32 `inactive:"+" desc:"maximum value of Delay -- based on MaxDelay values when the BuildGBuf function was called when the network was built -- cannot set it longer than this, except by calling BuildGBuf on network after changing MaxDelay to a larger value in any projection in the network."`
+ MaxDelay uint32 `inactive:"+"`
// probability of synaptic transmission failure -- if > 0, then weights are turned off at random as a function of PFail (times 1-SWt if PFailSwt)
- PFail float32 `desc:"probability of synaptic transmission failure -- if > 0, then weights are turned off at random as a function of PFail (times 1-SWt if PFailSwt)"`
+ PFail float32
// if true, then probability of failure is inversely proportional to SWt structural / slow weight value (i.e., multiply PFail * (1-SWt)))
- PFailSWt slbool.Bool `desc:"if true, then probability of failure is inversely proportional to SWt structural / slow weight value (i.e., multiply PFail * (1-SWt)))"`
+ PFailSWt slbool.Bool
- // [view: -] delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed
- DelLen uint32 `view:"-" desc:"delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed"`
+ // delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed
+ DelLen uint32 `view:"-"`
pad, pad1 float32
}
@@ -222,11 +222,11 @@ func (sc *SynComParams) Fail(ctx *Context, syni uint32, swt float32) {
// using both absolute and relative factors.
type PrjnScaleParams struct {
- // [min: 0] [Defaults: Forward=1, Back=0.2] relative scaling that shifts balance between different projections -- this is subject to normalization across all other projections into receiving neuron, and determines the GScale.Target for adapting scaling
- Rel float32 `min:"0" desc:"[Defaults: Forward=1, Back=0.2] relative scaling that shifts balance between different projections -- this is subject to normalization across all other projections into receiving neuron, and determines the GScale.Target for adapting scaling"`
+ // relative scaling that shifts balance between different projections -- this is subject to normalization across all other projections into receiving neuron, and determines the GScale.Target for adapting scaling
+ Rel float32 `min:"0"`
- // [def: 1] [min: 0] absolute multiplier adjustment factor for the prjn scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value
- Abs float32 `def:"1" min:"0" desc:"absolute multiplier adjustment factor for the prjn scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value"`
+ // absolute multiplier adjustment factor for the prjn scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value
+ Abs float32 `def:"1" min:"0"`
pad, pad1 float32
}
diff --git a/axon/act_test.go b/axon/act_test.go
index 51395411f..9d233c97a 100644
--- a/axon/act_test.go
+++ b/axon/act_test.go
@@ -9,7 +9,7 @@ package axon
import (
"testing"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
// TOLERANCE is the numerical difference tolerance for comparing vs. target values
diff --git a/axon/axon.go b/axon/axon.go
index d96aef570..e811b35da 100644
--- a/axon/axon.go
+++ b/axon/axon.go
@@ -5,7 +5,7 @@
package axon
import (
- "github.com/emer/emergent/emer"
+ "github.com/emer/emergent/v2/emer"
)
// AxonNetwork defines the essential algorithmic API for Axon, at the network level.
diff --git a/axon/basic_test.go b/axon/basic_test.go
index 1fab1b8c2..caa2a7495 100644
--- a/axon/basic_test.go
+++ b/axon/basic_test.go
@@ -17,13 +17,13 @@ import (
"strings"
"testing"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
"github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/mat32/v2"
"golang.org/x/exp/maps"
)
diff --git a/axon/context.go b/axon/context.go
index 85e6ea448..4f5239e68 100644
--- a/axon/context.go
+++ b/axon/context.go
@@ -7,10 +7,10 @@ package axon
import (
"math"
- "github.com/emer/emergent/etime"
+ "github.com/emer/emergent/v2/etime"
"github.com/goki/gosl/slbool"
"github.com/goki/gosl/slrand"
- "github.com/goki/ki/bools"
+ "goki.dev/glop/num"
)
var (
@@ -244,50 +244,50 @@ func (ctx *Context) CopyNetStridesFrom(srcCtx *Context) {
// NetIdxs are indexes and sizes for processing network
type NetIdxs struct {
- // [min: 1] number of data parallel items to process currently
- NData uint32 `min:"1" desc:"number of data parallel items to process currently"`
+ // number of data parallel items to process currently
+ NData uint32 `min:"1"`
// network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode
- NetIdx uint32 `inactive:"+" desc:"network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode"`
+ NetIdx uint32 `inactive:"+"`
// maximum amount of data parallel
- MaxData uint32 `inactive:"+" desc:"maximum amount of data parallel"`
+ MaxData uint32 `inactive:"+"`
// number of layers in the network
- NLayers uint32 `inactive:"+" desc:"number of layers in the network"`
+ NLayers uint32 `inactive:"+"`
// total number of neurons
- NNeurons uint32 `inactive:"+" desc:"total number of neurons"`
+ NNeurons uint32 `inactive:"+"`
// total number of pools excluding * MaxData factor
- NPools uint32 `inactive:"+" desc:"total number of pools excluding * MaxData factor"`
+ NPools uint32 `inactive:"+"`
// total number of synapses
- NSyns uint32 `inactive:"+" desc:"total number of synapses"`
+ NSyns uint32 `inactive:"+"`
// maximum size in float32 (4 bytes) of a GPU buffer -- needed for GPU access
- GPUMaxBuffFloats uint32 `inactive:"+" desc:"maximum size in float32 (4 bytes) of a GPU buffer -- needed for GPU access"`
+ GPUMaxBuffFloats uint32 `inactive:"+"`
// total number of SynCa banks of GPUMaxBufferBytes arrays in GPU
- GPUSynCaBanks uint32 `inactive:"+" desc:"total number of SynCa banks of GPUMaxBufferBytes arrays in GPU"`
+ GPUSynCaBanks uint32 `inactive:"+"`
// total number of PVLV Drives / positive USs
- PVLVNPosUSs uint32 `inactive:"+" desc:"total number of PVLV Drives / positive USs"`
+ PVLVNPosUSs uint32 `inactive:"+"`
// total number of PVLV Negative USs
- PVLVNNegUSs uint32 `inactive:"+" desc:"total number of PVLV Negative USs"`
+ PVLVNNegUSs uint32 `inactive:"+"`
// offset into GlobalVars for USneg values
- GvUSnegOff uint32 `inactive:"+" desc:"offset into GlobalVars for USneg values"`
+ GvUSnegOff uint32 `inactive:"+"`
// stride into GlobalVars for USneg values
- GvUSnegStride uint32 `inactive:"+" desc:"stride into GlobalVars for USneg values"`
+ GvUSnegStride uint32 `inactive:"+"`
// offset into GlobalVars for USpos, Drive, VSPatch values values
- GvUSposOff uint32 `inactive:"+" desc:"offset into GlobalVars for USpos, Drive, VSPatch values values"`
+ GvUSposOff uint32 `inactive:"+"`
// stride into GlobalVars for USpos, Drive, VSPatch values
- GvUSposStride uint32 `inactive:"+" desc:"stride into GlobalVars for USpos, Drive, VSPatch values"`
+ GvUSposStride uint32 `inactive:"+"`
pad uint32
}
@@ -349,72 +349,72 @@ func (ctx *NetIdxs) SynIdxIsValid(si uint32) bool {
type Context struct {
// current evaluation mode, e.g., Train, Test, etc
- Mode etime.Modes `desc:"current evaluation mode, e.g., Train, Test, etc"`
+ Mode etime.Modes
// if true, the model is being run in a testing mode, so no weight changes or other associated computations are needed. this flag should only affect learning-related behavior. Is automatically updated based on Mode != Train
- Testing slbool.Bool `inactive:"+" desc:"if true, the model is being run in a testing mode, so no weight changes or other associated computations are needed. this flag should only affect learning-related behavior. Is automatically updated based on Mode != Train"`
+ Testing slbool.Bool `inactive:"+"`
// phase counter: typicaly 0-1 for minus-plus but can be more phases for other algorithms
- Phase int32 `desc:"phase counter: typicaly 0-1 for minus-plus but can be more phases for other algorithms"`
+ Phase int32
// true if this is the plus phase, when the outcome / bursting is occurring, driving positive learning -- else minus phase
- PlusPhase slbool.Bool `desc:"true if this is the plus phase, when the outcome / bursting is occurring, driving positive learning -- else minus phase"`
+ PlusPhase slbool.Bool
// cycle within current phase -- minus or plus
- PhaseCycle int32 `desc:"cycle within current phase -- minus or plus"`
+ PhaseCycle int32
// cycle counter: number of iterations of activation updating (settling) on the current state -- this counts time sequentially until reset with NewState
- Cycle int32 `desc:"cycle counter: number of iterations of activation updating (settling) on the current state -- this counts time sequentially until reset with NewState"`
+ Cycle int32
- // [def: 200] length of the theta cycle in terms of 1 msec Cycles -- some network update steps depend on doing something at the end of the theta cycle (e.g., CTCtxtPrjn).
- ThetaCycles int32 `def:"200" desc:"length of the theta cycle in terms of 1 msec Cycles -- some network update steps depend on doing something at the end of the theta cycle (e.g., CTCtxtPrjn)."`
+ // length of the theta cycle in terms of 1 msec Cycles -- some network update steps depend on doing something at the end of the theta cycle (e.g., CTCtxtPrjn).
+ ThetaCycles int32 `def:"200"`
// total cycle count -- increments continuously from whenever it was last reset -- typically this is number of milliseconds in simulation time -- is int32 and not uint32 b/c used with Synapse CaUpT which needs to have a -1 case for expired update time
- CyclesTotal int32 `desc:"total cycle count -- increments continuously from whenever it was last reset -- typically this is number of milliseconds in simulation time -- is int32 and not uint32 b/c used with Synapse CaUpT which needs to have a -1 case for expired update time"`
+ CyclesTotal int32
// accumulated amount of time the network has been running, in simulation-time (not real world time), in seconds
- Time float32 `desc:"accumulated amount of time the network has been running, in simulation-time (not real world time), in seconds"`
+ Time float32
// total trial count -- increments continuously in NewState call *only in Train mode* from whenever it was last reset -- can be used for synchronizing weight updates across nodes
- TrialsTotal int32 `desc:"total trial count -- increments continuously in NewState call *only in Train mode* from whenever it was last reset -- can be used for synchronizing weight updates across nodes"`
+ TrialsTotal int32
- // [def: 0.001] amount of time to increment per cycle
- TimePerCycle float32 `def:"0.001" desc:"amount of time to increment per cycle"`
+ // amount of time to increment per cycle
+ TimePerCycle float32 `def:"0.001"`
- // [def: 100] how frequently to perform slow adaptive processes such as synaptic scaling, inhibition adaptation, associated in the brain with sleep, in the SlowAdapt method. This should be long enough for meaningful changes to accumulate -- 100 is default but could easily be longer in larger models. Because SlowCtr is incremented by NData, high NData cases (e.g. 16) likely need to increase this value -- e.g., 400 seems to produce overall consistent results in various models.
- SlowInterval int32 `def:"100" desc:"how frequently to perform slow adaptive processes such as synaptic scaling, inhibition adaptation, associated in the brain with sleep, in the SlowAdapt method. This should be long enough for meaningful changes to accumulate -- 100 is default but could easily be longer in larger models. Because SlowCtr is incremented by NData, high NData cases (e.g. 16) likely need to increase this value -- e.g., 400 seems to produce overall consistent results in various models."`
+ // how frequently to perform slow adaptive processes such as synaptic scaling, inhibition adaptation, associated in the brain with sleep, in the SlowAdapt method. This should be long enough for meaningful changes to accumulate -- 100 is default but could easily be longer in larger models. Because SlowCtr is incremented by NData, high NData cases (e.g. 16) likely need to increase this value -- e.g., 400 seems to produce overall consistent results in various models.
+ SlowInterval int32 `def:"100"`
// counter for how long it has been since last SlowAdapt step. Note that this is incremented by NData to maintain consistency across different values of this parameter.
- SlowCtr int32 `inactive:"+" desc:"counter for how long it has been since last SlowAdapt step. Note that this is incremented by NData to maintain consistency across different values of this parameter."`
+ SlowCtr int32 `inactive:"+"`
// synaptic calcium counter, which drives the CaUpT synaptic value to optimize updating of this computationally expensive factor. It is incremented by 1 for each cycle, and reset at the SlowInterval, at which point the synaptic calcium values are all reset.
- SynCaCtr float32 `inactive:"+" desc:"synaptic calcium counter, which drives the CaUpT synaptic value to optimize updating of this computationally expensive factor. It is incremented by 1 for each cycle, and reset at the SlowInterval, at which point the synaptic calcium values are all reset."`
+ SynCaCtr float32 `inactive:"+"`
pad, pad1 float32
- // [view: inline] indexes and sizes of current network
- NetIdxs NetIdxs `view:"inline" desc:"indexes and sizes of current network"`
+ // indexes and sizes of current network
+ NetIdxs NetIdxs `view:"inline"`
- // [view: -] stride offsets for accessing neuron variables
- NeuronVars NeuronVarStrides `view:"-" desc:"stride offsets for accessing neuron variables"`
+ // stride offsets for accessing neuron variables
+ NeuronVars NeuronVarStrides `view:"-"`
- // [view: -] stride offsets for accessing neuron average variables
- NeuronAvgVars NeuronAvgVarStrides `view:"-" desc:"stride offsets for accessing neuron average variables"`
+ // stride offsets for accessing neuron average variables
+ NeuronAvgVars NeuronAvgVarStrides `view:"-"`
- // [view: -] stride offsets for accessing neuron indexes
- NeuronIdxs NeuronIdxStrides `view:"-" desc:"stride offsets for accessing neuron indexes"`
+ // stride offsets for accessing neuron indexes
+ NeuronIdxs NeuronIdxStrides `view:"-"`
- // [view: -] stride offsets for accessing synapse variables
- SynapseVars SynapseVarStrides `view:"-" desc:"stride offsets for accessing synapse variables"`
+ // stride offsets for accessing synapse variables
+ SynapseVars SynapseVarStrides `view:"-"`
- // [view: -] stride offsets for accessing synapse Ca variables
- SynapseCaVars SynapseCaStrides `view:"-" desc:"stride offsets for accessing synapse Ca variables"`
+ // stride offsets for accessing synapse Ca variables
+ SynapseCaVars SynapseCaStrides `view:"-"`
- // [view: -] stride offsets for accessing synapse indexes
- SynapseIdxs SynapseIdxStrides `view:"-" desc:"stride offsets for accessing synapse indexes"`
+ // stride offsets for accessing synapse indexes
+ SynapseIdxs SynapseIdxStrides `view:"-"`
// random counter -- incremented by maximum number of possible random numbers generated per cycle, regardless of how many are actually used -- this is shared across all layers so must encompass all possible param settings.
- RandCtr slrand.Counter `desc:"random counter -- incremented by maximum number of possible random numbers generated per cycle, regardless of how many are actually used -- this is shared across all layers so must encompass all possible param settings."`
+ RandCtr slrand.Counter
}
// Defaults sets default values
@@ -742,7 +742,7 @@ func GlobalsReset(ctx *Context) {
// GlobalSetRew is a convenience function for setting the external reward
// state in Globals variables
func GlobalSetRew(ctx *Context, di uint32, rew float32, hasRew bool) {
- SetGlbV(ctx, di, GvHasRew, bools.ToFloat32(hasRew))
+ SetGlbV(ctx, di, GvHasRew, num.FromBool[float32](hasRew))
if hasRew {
SetGlbV(ctx, di, GvRew, rew)
} else {
diff --git a/axon/deep_layers.go b/axon/deep_layers.go
index 93232381a..489efc93f 100644
--- a/axon/deep_layers.go
+++ b/axon/deep_layers.go
@@ -5,8 +5,8 @@
package axon
import (
- "github.com/emer/emergent/params"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/params"
+ "goki.dev/mat32/v2"
)
//gosl: start deep_layers
@@ -15,11 +15,11 @@ import (
// CaSpkP integrated spiking values in Super layers -- thresholded.
type BurstParams struct {
- // [def: 0.1] [max: 1] Relative component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). This is the distance between the average and maximum activation values within layer (e.g., 0 = average, 1 = max). Overall effective threshold is MAX of relative and absolute thresholds.
- ThrRel float32 `max:"1" def:"0.1" desc:"Relative component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). This is the distance between the average and maximum activation values within layer (e.g., 0 = average, 1 = max). Overall effective threshold is MAX of relative and absolute thresholds."`
+ // Relative component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). This is the distance between the average and maximum activation values within layer (e.g., 0 = average, 1 = max). Overall effective threshold is MAX of relative and absolute thresholds.
+ ThrRel float32 `max:"1" def:"0.1"`
- // [def: 0.1] [min: 0] [max: 1] Absolute component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). Overall effective threshold is MAX of relative and absolute thresholds.
- ThrAbs float32 `min:"0" max:"1" def:"0.1" desc:"Absolute component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). Overall effective threshold is MAX of relative and absolute thresholds."`
+ // Absolute component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). Overall effective threshold is MAX of relative and absolute thresholds.
+ ThrAbs float32 `min:"0" max:"1" def:"0.1"`
pad, pad1 float32
}
@@ -42,14 +42,14 @@ func (bp *BurstParams) ThrFmAvgMax(avg, mx float32) float32 {
// CTParams control the CT corticothalamic neuron special behavior
type CTParams struct {
- // [def: 0.05,0.1,1,2] gain factor for context excitatory input, which is constant as compared to the spiking input from other projections, so it must be downscaled accordingly. This can make a difference and may need to be scaled up or down.
- GeGain float32 `def:"0.05,0.1,1,2" desc:"gain factor for context excitatory input, which is constant as compared to the spiking input from other projections, so it must be downscaled accordingly. This can make a difference and may need to be scaled up or down."`
+ // gain factor for context excitatory input, which is constant as compared to the spiking input from other projections, so it must be downscaled accordingly. This can make a difference and may need to be scaled up or down.
+ GeGain float32 `def:"0.05,0.1,1,2"`
- // [def: 0,50] decay time constant for context Ge input -- if > 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50
- DecayTau float32 `def:"0,50" desc:"decay time constant for context Ge input -- if > 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50"`
+ // decay time constant for context Ge input -- if > 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50
+ DecayTau float32 `def:"0,50"`
- // [view: -] 1 / tau
- DecayDt float32 `view:"-" json:"-" xml:"-" desc:"1 / tau"`
+ // 1 / tau
+ DecayDt float32 `view:"-" json:"-" xml:"-"`
pad float32
}
@@ -73,14 +73,14 @@ func (cp *CTParams) Defaults() {
// the corresponding driver neuron Burst activation (or CaSpkP if not Super)
type PulvParams struct {
- // [def: 0.1] [min: 0.0] multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit.
- DriveScale float32 `def:"0.1" min:"0.0" desc:"multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit."`
+ // multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit.
+ DriveScale float32 `def:"0.1" min:"0.0"`
- // [def: 0.6] [min: 0.01] Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning.
- FullDriveAct float32 `def:"0.6" min:"0.01" desc:"Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning."`
+ // Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning.
+ FullDriveAct float32 `def:"0.6" min:"0.01"`
// index of layer that generates the driving activity into this one -- set via SetBuildConfig(DriveLayName) setting
- DriveLayIdx int32 `inactive:"+" desc:"index of layer that generates the driving activity into this one -- set via SetBuildConfig(DriveLayName) setting"`
+ DriveLayIdx int32 `inactive:"+"`
pad float32
}
diff --git a/axon/deep_net.go b/axon/deep_net.go
index 6baa5f15c..cb81fd49e 100644
--- a/axon/deep_net.go
+++ b/axon/deep_net.go
@@ -8,9 +8,9 @@ import (
"fmt"
"strings"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
"golang.org/x/exp/maps"
)
diff --git a/axon/gpu.go b/axon/gpu.go
index cd599dda0..a9fa6e7a6 100644
--- a/axon/gpu.go
+++ b/axon/gpu.go
@@ -10,10 +10,10 @@ import (
"math"
"unsafe"
- "github.com/emer/empi/mpi"
- "github.com/goki/gi/oswin"
+ "github.com/emer/empi/v2/mpi"
"github.com/goki/vgpu/vgpu"
vk "github.com/goki/vulkan"
+ "goki.dev/goosi"
)
//go:embed shaders/*.spv
@@ -116,7 +116,7 @@ const CyclesN = 10
type PushOff struct {
// offset
- Off uint32 `desc:"offset"`
+ Off uint32
pad, pad1, pad2 uint32
}
@@ -126,71 +126,71 @@ type PushOff struct {
type GPU struct {
// if true, actually use the GPU
- On bool `desc:"if true, actually use the GPU"`
+ On bool
RecFunTimes bool `desc:"if true, slower separate shader pipeline runs are used, with a CPU-sync Wait at the end, to enable timing information about each individual shader to be collected using the network FunTimer system. otherwise, only aggregate information is available about the entire Cycle call.`
// if true, process each cycle one at a time. Otherwise, 10 cycles at a time are processed in one batch.
- CycleByCycle bool `desc:"if true, process each cycle one at a time. Otherwise, 10 cycles at a time are processed in one batch."`
+ CycleByCycle bool
- // [view: -] the network we operate on -- we live under this net
- Net *Network `view:"-" desc:"the network we operate on -- we live under this net"`
+ // the network we operate on -- we live under this net
+ Net *Network `view:"-"`
- // [view: -] the context we use
- Ctx *Context `view:"-" desc:"the context we use"`
+ // the context we use
+ Ctx *Context `view:"-"`
- // [view: -] the vgpu compute system
- Sys *vgpu.System `view:"-" desc:"the vgpu compute system"`
+ // the vgpu compute system
+ Sys *vgpu.System `view:"-"`
- // [view: -] VarSet = 0: the uniform LayerParams
- Params *vgpu.VarSet `view:"-" desc:"VarSet = 0: the uniform LayerParams"`
+ // VarSet = 0: the uniform LayerParams
+ Params *vgpu.VarSet `view:"-"`
- // [view: -] VarSet = 1: the storage indexes and PrjnParams
- Idxs *vgpu.VarSet `view:"-" desc:"VarSet = 1: the storage indexes and PrjnParams"`
+ // VarSet = 1: the storage indexes and PrjnParams
+ Idxs *vgpu.VarSet `view:"-"`
- // [view: -] VarSet = 2: the Storage buffer for RW state structs and neuron floats
- Structs *vgpu.VarSet `view:"-" desc:"VarSet = 2: the Storage buffer for RW state structs and neuron floats"`
+ // VarSet = 2: the Storage buffer for RW state structs and neuron floats
+ Structs *vgpu.VarSet `view:"-"`
- // [view: -] Varset = 3: the Storage buffer for synapses
- Syns *vgpu.VarSet `view:"-" desc:"Varset = 3: the Storage buffer for synapses"`
+ // Varset = 3: the Storage buffer for synapses
+ Syns *vgpu.VarSet `view:"-"`
- // [view: -] Varset = 4: the Storage buffer for SynCa banks
- SynCas *vgpu.VarSet `view:"-" desc:"Varset = 4: the Storage buffer for SynCa banks"`
+ // Varset = 4: the Storage buffer for SynCa banks
+ SynCas *vgpu.VarSet `view:"-"`
- // [view: -] for sequencing commands
- Semaphores map[string]vk.Semaphore `view:"-" desc:"for sequencing commands"`
+ // for sequencing commands
+ Semaphores map[string]vk.Semaphore `view:"-"`
- // [def: 64] [view: -] number of warp threads -- typically 64 -- must update all hlsl files if changed!
- NThreads int `view:"-" inactive:"-" def:"64" desc:"number of warp threads -- typically 64 -- must update all hlsl files if changed!"`
+ // number of warp threads -- typically 64 -- must update all hlsl files if changed!
+ NThreads int `view:"-" inactive:"-" def:"64"`
- // [view: -] maximum number of bytes per individual storage buffer element, from GPUProps.Limits.MaxStorageBufferRange
- MaxBufferBytes uint32 `view:"-" desc:"maximum number of bytes per individual storage buffer element, from GPUProps.Limits.MaxStorageBufferRange"`
+ // maximum number of bytes per individual storage buffer element, from GPUProps.Limits.MaxStorageBufferRange
+ MaxBufferBytes uint32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas0 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas0 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas1 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas1 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas2 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas2 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas3 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas3 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas4 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas4 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas5 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas5 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas6 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas6 []float32 `view:"-"`
- // [view: -] bank of floats for GPU access
- SynapseCas7 []float32 `view:"-" desc:"bank of floats for GPU access"`
+ // bank of floats for GPU access
+ SynapseCas7 []float32 `view:"-"`
- // [view: -] tracks var binding
- DidBind map[string]bool `view:"-" desc:"tracks var binding"`
+ // tracks var binding
+ DidBind map[string]bool `view:"-"`
}
// ConfigGPUwithGUI turns on GPU mode in context of an active GUI where Vulkan
@@ -198,7 +198,7 @@ type GPU struct {
// Configures the GPU -- call after Network is Built, initialized, params are set,
// and everything is ready to run.
func (nt *Network) ConfigGPUwithGUI(ctx *Context) {
- oswin.TheApp.RunOnMain(func() {
+ goosi.TheApp.RunOnMain(func() {
nt.GPU.Config(ctx, nt)
})
fmt.Printf("Running on GPU: %s\n", TheGPU.DeviceName)
diff --git a/axon/helpers.go b/axon/helpers.go
index 66d67394e..81e9a9584 100644
--- a/axon/helpers.go
+++ b/axon/helpers.go
@@ -7,9 +7,9 @@ package axon
import (
"fmt"
- "github.com/emer/emergent/ecmd"
- "github.com/emer/empi/mpi"
- "github.com/goki/gi/gi"
+ "github.com/emer/emergent/v2/ecmd"
+ "github.com/emer/empi/v2/mpi"
+ "goki.dev/gi/v2/gi"
)
////////////////////////////////////////////////////
diff --git a/axon/hip_net.go b/axon/hip_net.go
index 5d1d0e358..dfb40b37b 100644
--- a/axon/hip_net.go
+++ b/axon/hip_net.go
@@ -5,79 +5,79 @@
package axon
import (
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/evec"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/norm"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/evec"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/prjn"
+ "goki.dev/etable/v2/norm"
)
// HipConfig have the hippocampus size and connectivity parameters
type HipConfig struct {
// size of EC2
- EC2Size evec.Vec2i `nest:"+" desc:"size of EC2"`
+ EC2Size evec.Vec2i `nest:"+"`
// number of EC3 pools (outer dimension)
- EC3NPool evec.Vec2i `nest:"+" desc:"number of EC3 pools (outer dimension)"`
+ EC3NPool evec.Vec2i `nest:"+"`
// number of neurons in one EC3 pool
- EC3NNrn evec.Vec2i `nest:"+" desc:"number of neurons in one EC3 pool"`
+ EC3NNrn evec.Vec2i `nest:"+"`
// number of neurons in one CA1 pool
- CA1NNrn evec.Vec2i `nest:"+" desc:"number of neurons in one CA1 pool"`
+ CA1NNrn evec.Vec2i `nest:"+"`
// size of CA3
- CA3Size evec.Vec2i `nest:"+" desc:"size of CA3"`
+ CA3Size evec.Vec2i `nest:"+"`
- // [def: 2.236] size of DG / CA3
- DGRatio float32 `def:"2.236" desc:"size of DG / CA3"`
+ // size of DG / CA3
+ DGRatio float32 `def:"2.236"`
- // [def: 0.1] percent connectivity from EC3 to EC2
- EC3ToEC2PCon float32 `def:"0.1" desc:"percent connectivity from EC3 to EC2"`
+ // percent connectivity from EC3 to EC2
+ EC3ToEC2PCon float32 `def:"0.1"`
- // [def: 0.25] percent connectivity from EC2 to DG
- EC2ToDGPCon float32 `def:"0.25" desc:"percent connectivity from EC2 to DG"`
+ // percent connectivity from EC2 to DG
+ EC2ToDGPCon float32 `def:"0.25"`
- // [def: 0.25] percent connectivity from EC2 to CA3
- EC2ToCA3PCon float32 `def:"0.25" desc:"percent connectivity from EC2 to CA3"`
+ // percent connectivity from EC2 to CA3
+ EC2ToCA3PCon float32 `def:"0.25"`
- // [def: 0.25] percent connectivity from CA3 to CA1
- CA3ToCA1PCon float32 `def:"0.25" desc:"percent connectivity from CA3 to CA1"`
+ // percent connectivity from CA3 to CA1
+ CA3ToCA1PCon float32 `def:"0.25"`
- // [def: 0.02] percent connectivity into CA3 from DG
- DGToCA3PCon float32 `def:"0.02" desc:"percent connectivity into CA3 from DG"`
+ // percent connectivity into CA3 from DG
+ DGToCA3PCon float32 `def:"0.02"`
// lateral radius of connectivity in EC2
- EC2LatRadius int `desc:"lateral radius of connectivity in EC2"`
+ EC2LatRadius int
// lateral gaussian sigma in EC2 for how quickly weights fall off with distance
- EC2LatSigma float32 `desc:"lateral gaussian sigma in EC2 for how quickly weights fall off with distance"`
+ EC2LatSigma float32
- // [def: 1] proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc
- MossyDelta float32 `def:"1" desc:"proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc"`
+ // proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc
+ MossyDelta float32 `def:"1"`
- // [def: 0.75] proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc
- MossyDeltaTest float32 `def:"0.75" desc:"proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc"`
+ // proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc
+ MossyDeltaTest float32 `def:"0.75"`
- // [def: 0.9] low theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model
- ThetaLow float32 `def:"0.9" desc:"low theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model"`
+ // low theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model
+ ThetaLow float32 `def:"0.9"`
- // [def: 1] high theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model
- ThetaHigh float32 `def:"1" desc:"high theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model"`
+ // high theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model
+ ThetaHigh float32 `def:"1"`
- // [def: true] flag for clamping the EC5 from EC5ClampSrc
- EC5Clamp bool `def:"true" desc:"flag for clamping the EC5 from EC5ClampSrc"`
+ // flag for clamping the EC5 from EC5ClampSrc
+ EC5Clamp bool `def:"true"`
- // [def: EC3] source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available
- EC5ClampSrc string `def:"EC3" desc:"source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available"`
+ // source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available
+ EC5ClampSrc string `def:"EC3"`
- // [def: true] clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there
- EC5ClampTest bool `def:"true" desc:"clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there"`
+ // clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there
+ EC5ClampTest bool `def:"true"`
- // [def: 0.1] threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization.
- EC5ClampThr float32 `def:"0.1" desc:"threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization."`
+ // threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization.
+ EC5ClampThr float32 `def:"0.1"`
}
func (hip *HipConfig) Defaults() {
diff --git a/axon/inhib.go b/axon/inhib.go
index 9e5707f80..4364298ff 100644
--- a/axon/inhib.go
+++ b/axon/inhib.go
@@ -7,7 +7,7 @@ package axon
import (
"github.com/emer/axon/fsfffb"
"github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: hlsl inhib
@@ -24,23 +24,23 @@ import (
// average activity within a target range.
type ActAvgParams struct {
- // [min: 0] [step: 0.01] [typically 0.01 - 0.2] nominal estimated average activity level in the layer, which is used in computing the scaling factor on sending projections from this layer. In general it should roughly match the layer ActAvg.ActMAvg value, which can be logged using the axon.LogAddDiagnosticItems function. If layers receiving from this layer are not getting enough Ge excitation, then this Nominal level can be lowered to increase projection strength (fewer active neurons means each one contributes more, so scaling factor goes as the inverse of activity level), or vice-versa if Ge is too high. It is also the basis for the target activity level used for the AdaptGi option -- see the Offset which is added to this value.
- Nominal float32 `min:"0" step:"0.01" desc:"[typically 0.01 - 0.2] nominal estimated average activity level in the layer, which is used in computing the scaling factor on sending projections from this layer. In general it should roughly match the layer ActAvg.ActMAvg value, which can be logged using the axon.LogAddDiagnosticItems function. If layers receiving from this layer are not getting enough Ge excitation, then this Nominal level can be lowered to increase projection strength (fewer active neurons means each one contributes more, so scaling factor goes as the inverse of activity level), or vice-versa if Ge is too high. It is also the basis for the target activity level used for the AdaptGi option -- see the Offset which is added to this value."`
+ // nominal estimated average activity level in the layer, which is used in computing the scaling factor on sending projections from this layer. In general it should roughly match the layer ActAvg.ActMAvg value, which can be logged using the axon.LogAddDiagnosticItems function. If layers receiving from this layer are not getting enough Ge excitation, then this Nominal level can be lowered to increase projection strength (fewer active neurons means each one contributes more, so scaling factor goes as the inverse of activity level), or vice-versa if Ge is too high. It is also the basis for the target activity level used for the AdaptGi option -- see the Offset which is added to this value.
+ Nominal float32 `min:"0" step:"0.01"`
// enable adapting of layer inhibition Gi multiplier factor (stored in layer GiMult value) to maintain a Target layer level of ActAvg.ActMAvg. This generally works well and improves the long-term stability of the models. It is not enabled by default because it depends on having established a reasonable Nominal + Offset target activity level.
- AdaptGi slbool.Bool `desc:"enable adapting of layer inhibition Gi multiplier factor (stored in layer GiMult value) to maintain a Target layer level of ActAvg.ActMAvg. This generally works well and improves the long-term stability of the models. It is not enabled by default because it depends on having established a reasonable Nominal + Offset target activity level."`
+ AdaptGi slbool.Bool
- // [def: 0] [viewif: AdaptGi] [min: 0] [step: 0.01] offset to add to Nominal for the target average activity that drives adaptation of Gi for this layer. Typically the Nominal level is good, but sometimes Nominal must be adjusted up or down to achieve desired Ge scaling, so this Offset can compensate accordingly.
- Offset float32 `def:"0" min:"0" step:"0.01" viewif:"AdaptGi" desc:"offset to add to Nominal for the target average activity that drives adaptation of Gi for this layer. Typically the Nominal level is good, but sometimes Nominal must be adjusted up or down to achieve desired Ge scaling, so this Offset can compensate accordingly."`
+ // offset to add to Nominal for the target average activity that drives adaptation of Gi for this layer. Typically the Nominal level is good, but sometimes Nominal must be adjusted up or down to achieve desired Ge scaling, so this Offset can compensate accordingly.
+ Offset float32 `def:"0" min:"0" step:"0.01" viewif:"AdaptGi"`
- // [def: 0] [viewif: AdaptGi] tolerance for higher than Target target average activation as a proportion of that target value (0 = exactly the target, 0.2 = 20% higher than target) -- only once activations move outside this tolerance are inhibitory values adapted.
- HiTol float32 `def:"0" viewif:"AdaptGi" desc:"tolerance for higher than Target target average activation as a proportion of that target value (0 = exactly the target, 0.2 = 20% higher than target) -- only once activations move outside this tolerance are inhibitory values adapted."`
+ // tolerance for higher than Target target average activation as a proportion of that target value (0 = exactly the target, 0.2 = 20% higher than target) -- only once activations move outside this tolerance are inhibitory values adapted.
+ HiTol float32 `def:"0" viewif:"AdaptGi"`
- // [def: 0.8] [viewif: AdaptGi] tolerance for lower than Target target average activation as a proportion of that target value (0 = exactly the target, 0.5 = 50% lower than target) -- only once activations move outside this tolerance are inhibitory values adapted.
- LoTol float32 `def:"0.8" viewif:"AdaptGi" desc:"tolerance for lower than Target target average activation as a proportion of that target value (0 = exactly the target, 0.5 = 50% lower than target) -- only once activations move outside this tolerance are inhibitory values adapted."`
+ // tolerance for lower than Target target average activation as a proportion of that target value (0 = exactly the target, 0.5 = 50% lower than target) -- only once activations move outside this tolerance are inhibitory values adapted.
+ LoTol float32 `def:"0.8" viewif:"AdaptGi"`
- // [def: 0.1] [viewif: AdaptGi] rate of Gi adaptation as function of AdaptRate * (Target - ActMAvg) / Target -- occurs at spaced intervals determined by Network.SlowInterval value -- slower values such as 0.01 may be needed for large networks and sparse layers.
- AdaptRate float32 `def:"0.1" viewif:"AdaptGi" desc:"rate of Gi adaptation as function of AdaptRate * (Target - ActMAvg) / Target -- occurs at spaced intervals determined by Network.SlowInterval value -- slower values such as 0.01 may be needed for large networks and sparse layers."`
+ // rate of Gi adaptation as function of AdaptRate * (Target - ActMAvg) / Target -- occurs at spaced intervals determined by Network.SlowInterval value -- slower values such as 0.01 may be needed for large networks and sparse layers.
+ AdaptRate float32 `def:"0.1" viewif:"AdaptGi"`
pad, pad1 float32
}
@@ -85,31 +85,31 @@ func (aa *ActAvgParams) Adapt(gimult *float32, act float32) bool {
type TopoInhibParams struct {
// use topographic inhibition
- On slbool.Bool `desc:"use topographic inhibition"`
+ On slbool.Bool
- // [viewif: On] half-width of topographic inhibition within layer
- Width int32 `viewif:"On" desc:"half-width of topographic inhibition within layer"`
+ // half-width of topographic inhibition within layer
+ Width int32 `viewif:"On"`
- // [viewif: On] normalized gaussian sigma as proportion of Width, for gaussian weighting
- Sigma float32 `viewif:"On" desc:"normalized gaussian sigma as proportion of Width, for gaussian weighting"`
+ // normalized gaussian sigma as proportion of Width, for gaussian weighting
+ Sigma float32 `viewif:"On"`
- // [viewif: On] half-width of topographic inhibition within layer
- Wrap slbool.Bool `viewif:"On" desc:"half-width of topographic inhibition within layer"`
+ // half-width of topographic inhibition within layer
+ Wrap slbool.Bool `viewif:"On"`
- // [viewif: On] overall inhibition multiplier for topographic inhibition (generally <= 1)
- Gi float32 `viewif:"On" desc:"overall inhibition multiplier for topographic inhibition (generally <= 1)"`
+ // overall inhibition multiplier for topographic inhibition (generally <= 1)
+ Gi float32 `viewif:"On"`
- // [viewif: On] overall inhibitory contribution from feedforward inhibition -- multiplies average Ge from pools or Ge from neurons
- FF float32 `viewif:"On" desc:"overall inhibitory contribution from feedforward inhibition -- multiplies average Ge from pools or Ge from neurons"`
+ // overall inhibitory contribution from feedforward inhibition -- multiplies average Ge from pools or Ge from neurons
+ FF float32 `viewif:"On"`
- // [viewif: On] overall inhibitory contribution from feedback inhibition -- multiplies average activation from pools or Act from neurons
- FB float32 `viewif:"On" desc:"overall inhibitory contribution from feedback inhibition -- multiplies average activation from pools or Act from neurons"`
+ // overall inhibitory contribution from feedback inhibition -- multiplies average activation from pools or Act from neurons
+ FB float32 `viewif:"On"`
- // [viewif: On] feedforward zero point for Ge per neuron (summed Ge is compared to N * FF0) -- below this level, no FF inhibition is computed, above this it is FF * (Sum Ge - N * FF0)
- FF0 float32 `viewif:"On" desc:"feedforward zero point for Ge per neuron (summed Ge is compared to N * FF0) -- below this level, no FF inhibition is computed, above this it is FF * (Sum Ge - N * FF0)"`
+ // feedforward zero point for Ge per neuron (summed Ge is compared to N * FF0) -- below this level, no FF inhibition is computed, above this it is FF * (Sum Ge - N * FF0)
+ FF0 float32 `viewif:"On"`
// weight value at width -- to assess the value of Sigma
- WidthWt float32 `inactive:"+" desc:"weight value at width -- to assess the value of Sigma"`
+ WidthWt float32 `inactive:"+"`
pad, pad1, pad2 float32
}
@@ -146,14 +146,14 @@ func (ti *TopoInhibParams) GiFmGeAct(ge, act, ff0 float32) float32 {
// which is used for Ge rescaling and potentially for adapting inhibition over time
type InhibParams struct {
- // [view: inline] layer-level and pool-level average activation initial values and updating / adaptation thereof -- initial values help determine initial scaling factors.
- ActAvg ActAvgParams `view:"inline" desc:"layer-level and pool-level average activation initial values and updating / adaptation thereof -- initial values help determine initial scaling factors."`
+ // layer-level and pool-level average activation initial values and updating / adaptation thereof -- initial values help determine initial scaling factors.
+ ActAvg ActAvgParams `view:"inline"`
- // [view: inline] inhibition across the entire layer -- inputs generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers. If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition.
- Layer fsfffb.GiParams `view:"inline" desc:"inhibition across the entire layer -- inputs generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers. If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition."`
+ // inhibition across the entire layer -- inputs generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers. If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition.
+ Layer fsfffb.GiParams `view:"inline"`
- // [view: inline] inhibition within sub-pools of units, for layers with 4D shape -- almost always need this if the layer has pools.
- Pool fsfffb.GiParams `view:"inline" desc:"inhibition within sub-pools of units, for layers with 4D shape -- almost always need this if the layer has pools."`
+ // inhibition within sub-pools of units, for layers with 4D shape -- almost always need this if the layer has pools.
+ Pool fsfffb.GiParams `view:"inline"`
}
func (ip *InhibParams) Update() {
diff --git a/axon/layer.go b/axon/layer.go
index 0357b2fb8..0e43aa24b 100644
--- a/axon/layer.go
+++ b/axon/layer.go
@@ -10,11 +10,11 @@ import (
"math/rand"
"strings"
- "github.com/emer/emergent/erand"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/erand"
"github.com/goki/ki/ints"
"github.com/goki/ki/ki"
"github.com/goki/ki/kit"
+ "goki.dev/etable/v2/etensor"
)
// index naming:
@@ -27,7 +27,7 @@ type Layer struct {
LayerBase
// all layer-level parameters -- these must remain constant once configured
- Params *LayerParams `desc:"all layer-level parameters -- these must remain constant once configured"`
+ Params *LayerParams
}
var KiT_Layer = kit.Types.AddType(&Layer{}, LayerProps)
diff --git a/axon/layer_compute.go b/axon/layer_compute.go
index bd0be08ab..a9af52336 100644
--- a/axon/layer_compute.go
+++ b/axon/layer_compute.go
@@ -8,8 +8,8 @@ import (
"fmt"
"log"
- "github.com/emer/etable/minmax"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/mat32/v2"
)
// index naming:
diff --git a/axon/layer_test.go b/axon/layer_test.go
index 526df87d9..22608730e 100644
--- a/axon/layer_test.go
+++ b/axon/layer_test.go
@@ -5,10 +5,10 @@ import (
"os"
"testing"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/prjn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "goki.dev/etable/v2/etensor"
)
func TestLayer(t *testing.T) {
diff --git a/axon/layerbase.go b/axon/layerbase.go
index e6e228771..d08e0119c 100644
--- a/axon/layerbase.go
+++ b/axon/layerbase.go
@@ -12,14 +12,14 @@ import (
"math"
"strconv"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/relpos"
- "github.com/emer/emergent/weights"
- "github.com/emer/etable/etensor"
- "github.com/goki/gi/giv"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/relpos"
+ "github.com/emer/emergent/v2/weights"
"github.com/goki/ki/indent"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/mat32/v2"
)
// LayerBase manages the structural elements of the layer, which are common
@@ -30,77 +30,77 @@ import (
// accessed via the AxonLay field.
type LayerBase struct {
- // [view: -] we need a pointer to ourselves as an AxonLayer (which subsumes emer.Layer), which can always be used to extract the true underlying type of object when layer is embedded in other structs -- function receivers do not have this ability so this is necessary.
- AxonLay AxonLayer `copy:"-" json:"-" xml:"-" view:"-" desc:"we need a pointer to ourselves as an AxonLayer (which subsumes emer.Layer), which can always be used to extract the true underlying type of object when layer is embedded in other structs -- function receivers do not have this ability so this is necessary."`
+ // we need a pointer to ourselves as an AxonLayer (which subsumes emer.Layer), which can always be used to extract the true underlying type of object when layer is embedded in other structs -- function receivers do not have this ability so this is necessary.
+ AxonLay AxonLayer `copy:"-" json:"-" xml:"-" view:"-"`
- // [view: -] our parent network, in case we need to use it to find other layers etc -- set when added by network
- Network *Network `copy:"-" json:"-" xml:"-" view:"-" desc:"our parent network, in case we need to use it to find other layers etc -- set when added by network"`
+ // our parent network, in case we need to use it to find other layers etc -- set when added by network
+ Network *Network `copy:"-" json:"-" xml:"-" view:"-"`
// Name of the layer -- this must be unique within the network, which has a map for quick lookup and layers are typically accessed directly by name
- Nm string `desc:"Name of the layer -- this must be unique within the network, which has a map for quick lookup and layers are typically accessed directly by name"`
+ Nm string
// Class is for applying parameter styles, can be space separated multple tags
- Cls string `desc:"Class is for applying parameter styles, can be space separated multple tags"`
+ Cls string
// inactivate this layer -- allows for easy experimentation
- Off bool `desc:"inactivate this layer -- allows for easy experimentation"`
+ Off bool
// shape of the layer -- can be 2D for basic layers and 4D for layers with sub-groups (hypercolumns) -- order is outer-to-inner (row major) so Y then X for 2D and for 4D: Y-X unit pools then Y-X neurons within pools
- Shp etensor.Shape `desc:"shape of the layer -- can be 2D for basic layers and 4D for layers with sub-groups (hypercolumns) -- order is outer-to-inner (row major) so Y then X for 2D and for 4D: Y-X unit pools then Y-X neurons within pools"`
+ Shp etensor.Shape
// type of layer -- Hidden, Input, Target, Compare, or extended type in specialized algorithms -- matches against .Class parameter styles (e.g., .Hidden etc)
- Typ LayerTypes `desc:"type of layer -- Hidden, Input, Target, Compare, or extended type in specialized algorithms -- matches against .Class parameter styles (e.g., .Hidden etc)"`
+ Typ LayerTypes
- // [view: inline] [tableview: -] Spatial relationship to other layer, determines positioning
- Rel relpos.Rel `tableview:"-" view:"inline" desc:"Spatial relationship to other layer, determines positioning"`
+ // Spatial relationship to other layer, determines positioning
+ Rel relpos.Rel `tableview:"-" view:"inline"`
- // [tableview: -] position of lower-left-hand corner of layer in 3D space, computed from Rel. Layers are in X-Y width - height planes, stacked vertically in Z axis.
- Ps mat32.Vec3 `tableview:"-" desc:"position of lower-left-hand corner of layer in 3D space, computed from Rel. Layers are in X-Y width - height planes, stacked vertically in Z axis."`
+ // position of lower-left-hand corner of layer in 3D space, computed from Rel. Layers are in X-Y width - height planes, stacked vertically in Z axis.
+ Ps mat32.Vec3 `tableview:"-"`
- // [view: -] a 0..n-1 index of the position of the layer within list of layers in the network. For Axon networks, it only has significance in determining who gets which weights for enforcing initial weight symmetry -- higher layers get weights from lower layers.
- Idx int `view:"-" inactive:"-" desc:"a 0..n-1 index of the position of the layer within list of layers in the network. For Axon networks, it only has significance in determining who gets which weights for enforcing initial weight symmetry -- higher layers get weights from lower layers."`
+ // a 0..n-1 index of the position of the layer within list of layers in the network. For Axon networks, it only has significance in determining who gets which weights for enforcing initial weight symmetry -- higher layers get weights from lower layers.
+ Idx int `view:"-" inactive:"-"`
- // [view: -] number of neurons in the layer
- NNeurons uint32 `view:"-" desc:"number of neurons in the layer"`
+ // number of neurons in the layer
+ NNeurons uint32 `view:"-"`
- // [view: -] starting index of neurons for this layer within the global Network list
- NeurStIdx uint32 `view:"-" inactive:"-" desc:"starting index of neurons for this layer within the global Network list"`
+ // starting index of neurons for this layer within the global Network list
+ NeurStIdx uint32 `view:"-" inactive:"-"`
- // [view: -] number of pools based on layer shape -- at least 1 for layer pool + 4D subpools
- NPools uint32 `view:"-" desc:"number of pools based on layer shape -- at least 1 for layer pool + 4D subpools"`
+ // number of pools based on layer shape -- at least 1 for layer pool + 4D subpools
+ NPools uint32 `view:"-"`
- // [view: -] maximum amount of input data that can be processed in parallel in one pass of the network. Neuron, Pool, Vals storage is allocated to hold this amount.
- MaxData uint32 `view:"-" desc:"maximum amount of input data that can be processed in parallel in one pass of the network. Neuron, Pool, Vals storage is allocated to hold this amount."`
+ // maximum amount of input data that can be processed in parallel in one pass of the network. Neuron, Pool, Vals storage is allocated to hold this amount.
+ MaxData uint32 `view:"-"`
- // [view: -] indexes of representative units in the layer, for computationally expensive stats or displays -- also set RepShp
- RepIxs []int `view:"-" desc:"indexes of representative units in the layer, for computationally expensive stats or displays -- also set RepShp"`
+ // indexes of representative units in the layer, for computationally expensive stats or displays -- also set RepShp
+ RepIxs []int `view:"-"`
- // [view: -] shape of representative units in the layer -- if RepIxs is empty or .Shp is nil, use overall layer shape
- RepShp etensor.Shape `view:"-" desc:"shape of representative units in the layer -- if RepIxs is empty or .Shp is nil, use overall layer shape"`
+ // shape of representative units in the layer -- if RepIxs is empty or .Shp is nil, use overall layer shape
+ RepShp etensor.Shape `view:"-"`
// list of receiving projections into this layer from other layers
- RcvPrjns AxonPrjns `desc:"list of receiving projections into this layer from other layers"`
+ RcvPrjns AxonPrjns
// list of sending projections from this layer to other layers
- SndPrjns AxonPrjns `desc:"list of sending projections from this layer to other layers"`
+ SndPrjns AxonPrjns
// layer-level state values that are updated during computation -- one for each data parallel -- is a sub-slice of network full set
- Vals []LayerVals `desc:"layer-level state values that are updated during computation -- one for each data parallel -- is a sub-slice of network full set"`
+ Vals []LayerVals
// computes FS-FFFB inhibition and other pooled, aggregate state variables -- has at least 1 for entire layer (lpl = layer pool), and one for each sub-pool if shape supports that (4D) * 1 per data parallel (inner loop). This is a sub-slice from overall Network Pools slice. You must iterate over index and use pointer to modify values.
- Pools []Pool `desc:"computes FS-FFFB inhibition and other pooled, aggregate state variables -- has at least 1 for entire layer (lpl = layer pool), and one for each sub-pool if shape supports that (4D) * 1 per data parallel (inner loop). This is a sub-slice from overall Network Pools slice. You must iterate over index and use pointer to modify values."`
+ Pools []Pool
- // [view: -] [Neurons][Data] external input values for this layer, allocated from network global Exts slice
- Exts []float32 `view:"-" desc:"[Neurons][Data] external input values for this layer, allocated from network global Exts slice"`
+ // external input values for this layer, allocated from network global Exts slice
+ Exts []float32 `view:"-"`
- // [tableview: -] configuration data set when the network is configured, that is used during the network Build() process via PostBuild method, after all the structure of the network has been fully constructed. In particular, the Params is nil until Build, so setting anything specific in there (e.g., an index to another layer) must be done as a second pass. Note that Params are all applied after Build and can set user-modifiable params, so this is for more special algorithm structural parameters set during ConfigNet() methods.,
- BuildConfig map[string]string `tableview:"-" desc:"configuration data set when the network is configured, that is used during the network Build() process via PostBuild method, after all the structure of the network has been fully constructed. In particular, the Params is nil until Build, so setting anything specific in there (e.g., an index to another layer) must be done as a second pass. Note that Params are all applied after Build and can set user-modifiable params, so this is for more special algorithm structural parameters set during ConfigNet() methods.,"`
+ // configuration data set when the network is configured, that is used during the network Build() process via PostBuild method, after all the structure of the network has been fully constructed. In particular, the Params is nil until Build, so setting anything specific in there (e.g., an index to another layer) must be done as a second pass. Note that Params are all applied after Build and can set user-modifiable params, so this is for more special algorithm structural parameters set during ConfigNet() methods.,
+ BuildConfig map[string]string `tableview:"-"`
- // [tableview: -] default parameters that are applied prior to user-set parameters -- these are useful for specific layer functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a layer type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map.
- DefParams params.Params `tableview:"-" desc:"default parameters that are applied prior to user-set parameters -- these are useful for specific layer functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a layer type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map."`
+ // default parameters that are applied prior to user-set parameters -- these are useful for specific layer functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a layer type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map.
+ DefParams params.Params `tableview:"-"`
- // [tableview: -] provides a history of parameters applied to the layer
- ParamsHistory params.HistoryImpl `tableview:"-" desc:"provides a history of parameters applied to the layer"`
+ // provides a history of parameters applied to the layer
+ ParamsHistory params.HistoryImpl `tableview:"-"`
}
// emer.Layer interface methods
diff --git a/axon/layerparams.go b/axon/layerparams.go
index e2b126b2b..6849e3458 100644
--- a/axon/layerparams.go
+++ b/axon/layerparams.go
@@ -7,7 +7,7 @@ package axon
import (
"encoding/json"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: hlsl layerparams
@@ -29,46 +29,46 @@ import (
type LayerIdxs struct {
// layer index
- LayIdx uint32 `inactive:"+" desc:"layer index"`
+ LayIdx uint32 `inactive:"+"`
// maximum number of data parallel elements
- MaxData uint32 `inactive:"+" desc:"maximum number of data parallel elements"`
+ MaxData uint32 `inactive:"+"`
// start of pools for this layer -- first one is always the layer-wide pool
- PoolSt uint32 `inactive:"+" desc:"start of pools for this layer -- first one is always the layer-wide pool"`
+ PoolSt uint32 `inactive:"+"`
// start of neurons for this layer in global array (same as Layer.NeurStIdx)
- NeurSt uint32 `inactive:"+" desc:"start of neurons for this layer in global array (same as Layer.NeurStIdx)"`
+ NeurSt uint32 `inactive:"+"`
// number of neurons in layer
- NeurN uint32 `inactive:"+" desc:"number of neurons in layer"`
+ NeurN uint32 `inactive:"+"`
// start index into RecvPrjns global array
- RecvSt uint32 `inactive:"+" desc:"start index into RecvPrjns global array"`
+ RecvSt uint32 `inactive:"+"`
// number of recv projections
- RecvN uint32 `inactive:"+" desc:"number of recv projections"`
+ RecvN uint32 `inactive:"+"`
// start index into RecvPrjns global array
- SendSt uint32 `inactive:"+" desc:"start index into RecvPrjns global array"`
+ SendSt uint32 `inactive:"+"`
// number of recv projections
- SendN uint32 `inactive:"+" desc:"number of recv projections"`
+ SendN uint32 `inactive:"+"`
// starting index in network global Exts list of external input for this layer -- only for Input / Target / Compare layer types
- ExtsSt uint32 `inactive:"+" desc:"starting index in network global Exts list of external input for this layer -- only for Input / Target / Compare layer types"`
+ ExtsSt uint32 `inactive:"+"`
// layer shape Pools Y dimension -- 1 for 2D
- ShpPlY int32 `inactive:"+" desc:"layer shape Pools Y dimension -- 1 for 2D"`
+ ShpPlY int32 `inactive:"+"`
// layer shape Pools X dimension -- 1 for 2D
- ShpPlX int32 `inactive:"+" desc:"layer shape Pools X dimension -- 1 for 2D"`
+ ShpPlX int32 `inactive:"+"`
// layer shape Units Y dimension
- ShpUnY int32 `inactive:"+" desc:"layer shape Units Y dimension"`
+ ShpUnY int32 `inactive:"+"`
// layer shape Units X dimension
- ShpUnX int32 `inactive:"+" desc:"layer shape Units X dimension"`
+ ShpUnX int32 `inactive:"+"`
pad, pad1 uint32
}
@@ -95,16 +95,16 @@ func (lx *LayerIdxs) ExtIdx(ni, di uint32) uint32 {
type LayerInhibIdxs struct {
// idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used
- Idx1 int32 `inactive:"+" desc:"idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used"`
+ Idx1 int32 `inactive:"+"`
// idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used
- Idx2 int32 `inactive:"+" desc:"idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used"`
+ Idx2 int32 `inactive:"+"`
// idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used
- Idx3 int32 `inactive:"+" desc:"idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used"`
+ Idx3 int32 `inactive:"+"`
// idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used
- Idx4 int32 `inactive:"+" desc:"idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used"`
+ Idx4 int32 `inactive:"+"`
}
// note: the following must appear above LayerParams for GPU usage which is order sensitive
@@ -134,60 +134,60 @@ func SetNeuronExtPosNeg(ctx *Context, ni, di uint32, val float32) {
type LayerParams struct {
// functional type of layer -- determines functional code path for specialized layer types, and is synchronized with the Layer.Typ value
- LayType LayerTypes `desc:"functional type of layer -- determines functional code path for specialized layer types, and is synchronized with the Layer.Typ value"`
+ LayType LayerTypes
pad, pad1, pad2 int32
- // [view: add-fields] Activation parameters and methods for computing activations
- Acts ActParams `view:"add-fields" desc:"Activation parameters and methods for computing activations"`
+ // Activation parameters and methods for computing activations
+ Acts ActParams `view:"add-fields"`
- // [view: add-fields] Inhibition parameters and methods for computing layer-level inhibition
- Inhib InhibParams `view:"add-fields" desc:"Inhibition parameters and methods for computing layer-level inhibition"`
+ // Inhibition parameters and methods for computing layer-level inhibition
+ Inhib InhibParams `view:"add-fields"`
- // [view: inline] indexes of layers that contribute between-layer inhibition to this layer -- set these indexes via BuildConfig LayInhibXName (X = 1, 2...)
- LayInhib LayerInhibIdxs `view:"inline" desc:"indexes of layers that contribute between-layer inhibition to this layer -- set these indexes via BuildConfig LayInhibXName (X = 1, 2...)"`
+ // indexes of layers that contribute between-layer inhibition to this layer -- set these indexes via BuildConfig LayInhibXName (X = 1, 2...)
+ LayInhib LayerInhibIdxs `view:"inline"`
- // [view: add-fields] Learning parameters and methods that operate at the neuron level
- Learn LearnNeurParams `view:"add-fields" desc:"Learning parameters and methods that operate at the neuron level"`
+ // Learning parameters and methods that operate at the neuron level
+ Learn LearnNeurParams `view:"add-fields"`
- // [view: inline] [viewif: LayType=SuperLayer] BurstParams determine how the 5IB Burst activation is computed from CaSpkP integrated spiking values in Super layers -- thresholded.
- Bursts BurstParams `viewif:"LayType=SuperLayer" view:"inline" desc:"BurstParams determine how the 5IB Burst activation is computed from CaSpkP integrated spiking values in Super layers -- thresholded."`
+ // BurstParams determine how the 5IB Burst activation is computed from CaSpkP integrated spiking values in Super layers -- thresholded.
+ Bursts BurstParams `viewif:"LayType=SuperLayer" view:"inline"`
- // [view: inline] [viewif: LayType=[CTLayer,PTPredLayer,PTNotMaintLayer,BLALayer]] params for the CT corticothalamic layer and PTPred layer that generates predictions over the Pulvinar using context -- uses the CtxtGe excitatory input plus stronger NMDA channels to maintain context trace
- CT CTParams `viewif:"LayType=[CTLayer,PTPredLayer,PTNotMaintLayer,BLALayer]" view:"inline" desc:"params for the CT corticothalamic layer and PTPred layer that generates predictions over the Pulvinar using context -- uses the CtxtGe excitatory input plus stronger NMDA channels to maintain context trace"`
+ // ] params for the CT corticothalamic layer and PTPred layer that generates predictions over the Pulvinar using context -- uses the CtxtGe excitatory input plus stronger NMDA channels to maintain context trace
+ CT CTParams `viewif:"LayType=[CTLayer,PTPredLayer,PTNotMaintLayer,BLALayer]" view:"inline"`
- // [view: inline] [viewif: LayType=PulvinarLayer] provides parameters for how the plus-phase (outcome) state of Pulvinar thalamic relay cell neurons is computed from the corresponding driver neuron Burst activation (or CaSpkP if not Super)
- Pulv PulvParams `viewif:"LayType=PulvinarLayer" view:"inline" desc:"provides parameters for how the plus-phase (outcome) state of Pulvinar thalamic relay cell neurons is computed from the corresponding driver neuron Burst activation (or CaSpkP if not Super)"`
+ // provides parameters for how the plus-phase (outcome) state of Pulvinar thalamic relay cell neurons is computed from the corresponding driver neuron Burst activation (or CaSpkP if not Super)
+ Pulv PulvParams `viewif:"LayType=PulvinarLayer" view:"inline"`
- // [view: inline] [viewif: LayType=MatrixLayer] parameters for BG Striatum Matrix MSN layers, which are the main Go / NoGo gating units in BG.
- Matrix MatrixParams `viewif:"LayType=MatrixLayer" view:"inline" desc:"parameters for BG Striatum Matrix MSN layers, which are the main Go / NoGo gating units in BG."`
+ // parameters for BG Striatum Matrix MSN layers, which are the main Go / NoGo gating units in BG.
+ Matrix MatrixParams `viewif:"LayType=MatrixLayer" view:"inline"`
- // [view: inline] [viewif: LayType=GPLayer] type of GP Layer.
- GP GPParams `viewif:"LayType=GPLayer" view:"inline" desc:"type of GP Layer."`
+ // type of GP Layer.
+ GP GPParams `viewif:"LayType=GPLayer" view:"inline"`
- // [view: inline] [viewif: LayType=VSPatchLayer] parameters for VSPatch learning
- VSPatch VSPatchParams `viewif:"LayType=VSPatchLayer" view:"inline" desc:"parameters for VSPatch learning"`
+ // parameters for VSPatch learning
+ VSPatch VSPatchParams `viewif:"LayType=VSPatchLayer" view:"inline"`
- // [view: inline] [viewif: LayType=LDTLayer] parameterizes laterodorsal tegmentum ACh salience neuromodulatory signal, driven by superior colliculus stimulus novelty, US input / absence, and OFC / ACC inhibition
- LDT LDTParams `viewif:"LayType=LDTLayer" view:"inline" desc:"parameterizes laterodorsal tegmentum ACh salience neuromodulatory signal, driven by superior colliculus stimulus novelty, US input / absence, and OFC / ACC inhibition"`
+ // parameterizes laterodorsal tegmentum ACh salience neuromodulatory signal, driven by superior colliculus stimulus novelty, US input / absence, and OFC / ACC inhibition
+ LDT LDTParams `viewif:"LayType=LDTLayer" view:"inline"`
- // [view: inline] [viewif: LayType=VTALayer] parameterizes computing overall VTA DA based on LHb PVDA (primary value -- at US time, computed at start of each trial and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned value (LV) activations, which update every cycle.
- VTA VTAParams `viewif:"LayType=VTALayer" view:"inline" desc:"parameterizes computing overall VTA DA based on LHb PVDA (primary value -- at US time, computed at start of each trial and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned value (LV) activations, which update every cycle."`
+ // parameterizes computing overall VTA DA based on LHb PVDA (primary value -- at US time, computed at start of each trial and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned value (LV) activations, which update every cycle.
+ VTA VTAParams `viewif:"LayType=VTALayer" view:"inline"`
- // [view: inline] [viewif: LayType=RWPredLayer] parameterizes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework).
- RWPred RWPredParams `viewif:"LayType=RWPredLayer" view:"inline" desc:"parameterizes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework)."`
+ // parameterizes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework).
+ RWPred RWPredParams `viewif:"LayType=RWPredLayer" view:"inline"`
- // [view: inline] [viewif: LayType=RWDaLayer] parameterizes reward prediction dopamine for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework).
- RWDa RWDaParams `viewif:"LayType=RWDaLayer" view:"inline" desc:"parameterizes reward prediction dopamine for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework)."`
+ // parameterizes reward prediction dopamine for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework).
+ RWDa RWDaParams `viewif:"LayType=RWDaLayer" view:"inline"`
- // [view: inline] [viewif: LayType=TDIntegLayer] parameterizes TD reward integration layer
- TDInteg TDIntegParams `viewif:"LayType=TDIntegLayer" view:"inline" desc:"parameterizes TD reward integration layer"`
+ // parameterizes TD reward integration layer
+ TDInteg TDIntegParams `viewif:"LayType=TDIntegLayer" view:"inline"`
- // [view: inline] [viewif: LayType=TDDaLayer] parameterizes dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase.
- TDDa TDDaParams `viewif:"LayType=TDDaLayer" view:"inline" desc:"parameterizes dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase."`
+ // parameterizes dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase.
+ TDDa TDDaParams `viewif:"LayType=TDDaLayer" view:"inline"`
// recv and send projection array access info
- Idxs LayerIdxs `desc:"recv and send projection array access info"`
+ Idxs LayerIdxs
}
func (ly *LayerParams) Update() {
diff --git a/axon/learn.go b/axon/learn.go
index f228f6a34..a434ef299 100644
--- a/axon/learn.go
+++ b/axon/learn.go
@@ -7,10 +7,10 @@ package axon
import (
"github.com/emer/axon/chans"
"github.com/emer/axon/kinase"
- "github.com/emer/emergent/erand"
- "github.com/emer/etable/minmax"
+ "github.com/emer/emergent/v2/erand"
"github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/mat32/v2"
)
///////////////////////////////////////////////////////////////////////
@@ -30,29 +30,29 @@ import (
// CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
type CaLrnParams struct {
- // [def: 80] denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance
- Norm float32 `def:"80" desc:"denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance"`
+ // denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance
+ Norm float32 `def:"80"`
- // [def: true] use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike
- SpkVGCC slbool.Bool `def:"true" desc:"use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike"`
+ // use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike
+ SpkVGCC slbool.Bool `def:"true"`
- // [def: 35] multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode
- SpkVgccCa float32 `def:"35" desc:"multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode"`
+ // multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode
+ SpkVgccCa float32 `def:"35"`
- // [def: 10] time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn
- VgccTau float32 `def:"10" desc:"time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn"`
+ // time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn
+ VgccTau float32 `def:"10"`
- // [view: inline] time constants for integrating CaLrn across M, P and D cascading levels
- Dt kinase.CaDtParams `view:"inline" desc:"time constants for integrating CaLrn across M, P and D cascading levels"`
+ // time constants for integrating CaLrn across M, P and D cascading levels
+ Dt kinase.CaDtParams `view:"inline"`
- // [def: 0.01,0.02,0.5] Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default.
- UpdtThr float32 `def:"0.01,0.02,0.5" desc:"Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default."`
+ // Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default.
+ UpdtThr float32 `def:"0.01,0.02,0.5"`
- // [view: -] rate = 1 / tau
- VgccDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ VgccDt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
- // [view: -] = 1 / Norm
- NormInv float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"= 1 / Norm"`
+ // = 1 / Norm
+ NormInv float32 `view:"-" json:"-" xml:"-" inactive:"+"`
pad int32
}
@@ -104,19 +104,19 @@ func (np *CaLrnParams) CaLrns(ctx *Context, ni, di uint32) {
// and RLRate as a proxy for the activation (spiking) based learning signal.
type CaSpkParams struct {
- // [def: 8,12] gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdtThr. Prjn.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller.
- SpikeG float32 `def:"8,12" desc:"gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdtThr. Prjn.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller."`
+ // gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdtThr. Prjn.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller.
+ SpikeG float32 `def:"8,12"`
- // [def: 30] [min: 1] time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PrjnParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau)
- SynTau float32 `def:"30" min:"1" desc:"time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PrjnParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau)"`
+ // time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PrjnParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau)
+ SynTau float32 `def:"30" min:"1"`
- // [view: -] rate = 1 / tau
- SynDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ SynDt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
pad int32
- // [view: inline] time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Prjn level for synaptic integration, except for the M factor.
- Dt kinase.CaDtParams `view:"inline" desc:"time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Prjn level for synaptic integration, except for the M factor."`
+ // time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Prjn level for synaptic integration, except for the M factor.
+ Dt kinase.CaDtParams `view:"inline"`
}
func (np *CaSpkParams) Defaults() {
@@ -149,30 +149,30 @@ func (np *CaSpkParams) CaFmSpike(ctx *Context, ni, di uint32) {
type TrgAvgActParams struct {
// whether to use target average activity mechanism to scale synaptic weights
- On slbool.Bool `desc:"whether to use target average activity mechanism to scale synaptic weights"`
+ On slbool.Bool
// if this is > 0, then each neuron's GiBase is initialized as this proportion of TrgRange.Max - TrgAvg -- gives neurons differences in intrinsic inhibition / leak as a starting bias
- GiBaseInit float32 `desc:"if this is > 0, then each neuron's GiBase is initialized as this proportion of TrgRange.Max - TrgAvg -- gives neurons differences in intrinsic inhibition / leak as a starting bias"`
+ GiBaseInit float32
- // [def: 0.02] [viewif: On] learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference.
- ErrLRate float32 `viewif:"On" def:"0.02" desc:"learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference."`
+ // learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference.
+ ErrLRate float32 `viewif:"On" def:"0.02"`
- // [def: 0.005,0.0002] [viewif: On] rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value.
- SynScaleRate float32 `viewif:"On" def:"0.005,0.0002" desc:"rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value."`
+ // rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value.
+ SynScaleRate float32 `viewif:"On" def:"0.005,0.0002"`
- // [def: 0,1] [viewif: On] amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point.
- SubMean float32 `viewif:"On" def:"0,1" desc:"amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point."`
+ // amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point.
+ SubMean float32 `viewif:"On" def:"0,1"`
- // [def: true] [viewif: On] permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used
- Permute slbool.Bool `viewif:"On" def:"true" desc:"permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used"`
+ // permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used
+ Permute slbool.Bool `viewif:"On" def:"true"`
- // [viewif: On] use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool
- Pool slbool.Bool `viewif:"On" desc:"use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool"`
+ // use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool
+ Pool slbool.Bool `viewif:"On"`
pad int32
- // [def: {'Min':0.5,'Max':2}] [viewif: On] range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range.
- TrgRange minmax.F32 `viewif:"On" def:"{'Min':0.5,'Max':2}" desc:"range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range."`
+ // range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range.
+ TrgRange minmax.F32 `viewif:"On" def:"{'Min':0.5,'Max':2}"`
}
func (ta *TrgAvgActParams) Update() {
@@ -197,23 +197,23 @@ func (ta *TrgAvgActParams) Defaults() {
// activity levels, and based on the phase-wise differences in activity (Diff).
type RLRateParams struct {
- // [def: true] use learning rate modulation
- On slbool.Bool `def:"true" desc:"use learning rate modulation"`
+ // use learning rate modulation
+ On slbool.Bool `def:"true"`
- // [def: 0.05,1] [viewif: On] minimum learning rate multiplier for sigmoidal act (1-act) factor -- prevents lrate from going too low for extreme values. Set to 1 to disable Sigmoid derivative factor, which is default for Target layers.
- SigmoidMin float32 `viewif:"On" def:"0.05,1" desc:"minimum learning rate multiplier for sigmoidal act (1-act) factor -- prevents lrate from going too low for extreme values. Set to 1 to disable Sigmoid derivative factor, which is default for Target layers."`
+ // minimum learning rate multiplier for sigmoidal act (1-act) factor -- prevents lrate from going too low for extreme values. Set to 1 to disable Sigmoid derivative factor, which is default for Target layers.
+ SigmoidMin float32 `viewif:"On" def:"0.05,1"`
- // [viewif: On] modulate learning rate as a function of plus - minus differences
- Diff slbool.Bool `viewif:"On" desc:"modulate learning rate as a function of plus - minus differences"`
+ // modulate learning rate as a function of plus - minus differences
+ Diff slbool.Bool `viewif:"On"`
- // [def: 0.1] [viewif: On&&Diff] threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies -- must be > 0 to prevent div by zero
- SpkThr float32 `viewif:"On&&Diff" def:"0.1" desc:"threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies -- must be > 0 to prevent div by zero"`
+ // threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies -- must be > 0 to prevent div by zero
+ SpkThr float32 `viewif:"On&&Diff" def:"0.1"`
- // [def: 0.02] [viewif: On&&Diff] threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value
- DiffThr float32 `viewif:"On&&Diff" def:"0.02" desc:"threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value"`
+ // threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value
+ DiffThr float32 `viewif:"On&&Diff" def:"0.02"`
- // [def: 0.001] [viewif: On&&Diff] for Diff component, minimum learning rate value when below ActDiffThr
- Min float32 `viewif:"On&&Diff" def:"0.001" desc:"for Diff component, minimum learning rate value when below ActDiffThr"`
+ // for Diff component, minimum learning rate value when below ActDiffThr
+ Min float32 `viewif:"On&&Diff" def:"0.001"`
pad, pad1 int32
}
@@ -270,23 +270,23 @@ func (rl *RLRateParams) RLRateDiff(scap, scad float32) float32 {
// This is mainly the running average activations that drive learning
type LearnNeurParams struct {
- // [view: inline] parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
- CaLearn CaLrnParams `view:"inline" desc:"parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase)."`
+ // parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
+ CaLearn CaLrnParams `view:"inline"`
- // [view: inline] parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdtThr and RLRate as a proxy for the activation (spiking) based learning signal.
- CaSpk CaSpkParams `view:"inline" desc:"parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdtThr and RLRate as a proxy for the activation (spiking) based learning signal."`
+ // parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdtThr and RLRate as a proxy for the activation (spiking) based learning signal.
+ CaSpk CaSpkParams `view:"inline"`
- // [view: inline] NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes
- LrnNMDA chans.NMDAParams `view:"inline" desc:"NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes"`
+ // NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes
+ LrnNMDA chans.NMDAParams `view:"inline"`
- // [view: inline] synaptic scaling parameters for regulating overall average activity compared to neuron's own target level
- TrgAvgAct TrgAvgActParams `view:"inline" desc:"synaptic scaling parameters for regulating overall average activity compared to neuron's own target level"`
+ // synaptic scaling parameters for regulating overall average activity compared to neuron's own target level
+ TrgAvgAct TrgAvgActParams `view:"inline"`
- // [view: inline] recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)
- RLRate RLRateParams `view:"inline" desc:"recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)"`
+ // recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)
+ RLRate RLRateParams `view:"inline"`
- // [view: inline] neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms
- NeuroMod NeuroModParams `view:"inline" desc:"neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms"`
+ // neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms
+ NeuroMod NeuroModParams `view:"inline"`
}
func (ln *LearnNeurParams) Update() {
@@ -419,17 +419,17 @@ func SigInvFun61(w float32) float32 {
// SWtInitParams for initial SWt values
type SWtInitParams struct {
- // [def: 0,1,0.5] [min: 0] [max: 1] how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed.
- SPct float32 `min:"0" max:"1" def:"0,1,0.5" desc:"how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed."`
+ // how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed.
+ SPct float32 `min:"0" max:"1" def:"0,1,0.5"`
- // [def: 0.5,0.4] target mean weight values across receiving neuron's projection -- the mean SWt values are constrained to remain at this value. some projections may benefit from lower mean of .4
- Mean float32 `def:"0.5,0.4" desc:"target mean weight values across receiving neuron's projection -- the mean SWt values are constrained to remain at this value. some projections may benefit from lower mean of .4"`
+ // target mean weight values across receiving neuron's projection -- the mean SWt values are constrained to remain at this value. some projections may benefit from lower mean of .4
+ Mean float32 `def:"0.5,0.4"`
- // [def: 0.25] initial variance in weight values, prior to constraints.
- Var float32 `def:"0.25" desc:"initial variance in weight values, prior to constraints."`
+ // initial variance in weight values, prior to constraints.
+ Var float32 `def:"0.25"`
- // [def: true] symmetrize the initial weight values with those in reciprocal projection -- typically true for bidirectional excitatory connections
- Sym slbool.Bool `def:"true" desc:"symmetrize the initial weight values with those in reciprocal projection -- typically true for bidirectional excitatory connections"`
+ // symmetrize the initial weight values with those in reciprocal projection -- typically true for bidirectional excitatory connections
+ Sym slbool.Bool `def:"true"`
}
func (sp *SWtInitParams) Defaults() {
@@ -446,16 +446,16 @@ func (sp *SWtInitParams) Update() {
type SWtAdaptParams struct {
// if true, adaptation is active -- if false, SWt values are not updated, in which case it is generally good to have Init.SPct=0 too.
- On slbool.Bool `desc:"if true, adaptation is active -- if false, SWt values are not updated, in which case it is generally good to have Init.SPct=0 too."`
+ On slbool.Bool
- // [def: 0.1,0.01,0.001,0.0002] [viewif: On] learning rate multiplier on the accumulated DWt values (which already have fast LRate applied) to incorporate into SWt during slow outer loop updating -- lower values impose stronger constraints, for larger networks that need more structural support, e.g., 0.001 is better after 1,000 epochs in large models. 0.1 is fine for smaller models.
- LRate float32 `viewif:"On" def:"0.1,0.01,0.001,0.0002" desc:"learning rate multiplier on the accumulated DWt values (which already have fast LRate applied) to incorporate into SWt during slow outer loop updating -- lower values impose stronger constraints, for larger networks that need more structural support, e.g., 0.001 is better after 1,000 epochs in large models. 0.1 is fine for smaller models."`
+ // learning rate multiplier on the accumulated DWt values (which already have fast LRate applied) to incorporate into SWt during slow outer loop updating -- lower values impose stronger constraints, for larger networks that need more structural support, e.g., 0.001 is better after 1,000 epochs in large models. 0.1 is fine for smaller models.
+ LRate float32 `viewif:"On" def:"0.1,0.01,0.001,0.0002"`
- // [def: 1] [viewif: On] amount of mean to subtract from SWt delta when updating -- generally best to set to 1
- SubMean float32 `viewif:"On" def:"1" desc:"amount of mean to subtract from SWt delta when updating -- generally best to set to 1"`
+ // amount of mean to subtract from SWt delta when updating -- generally best to set to 1
+ SubMean float32 `viewif:"On" def:"1"`
- // [def: 6] [viewif: On] gain of sigmoidal constrast enhancement function used to transform learned, linear LWt values into Wt values
- SigGain float32 `viewif:"On" def:"6" desc:"gain of sigmoidal constrast enhancement function used to transform learned, linear LWt values into Wt values"`
+ // gain of sigmoidal constrast enhancement function used to transform learned, linear LWt values into Wt values
+ SigGain float32 `viewif:"On" def:"6"`
}
func (sp *SWtAdaptParams) Defaults() {
@@ -492,14 +492,14 @@ func (sp *SWtInitParams) RndVar(rnd erand.Rand) float32 {
// more dynamic and supported by the regular learned weights.
type SWtParams struct {
- // [view: inline] initialization of SWt values
- Init SWtInitParams `view:"inline" desc:"initialization of SWt values"`
+ // initialization of SWt values
+ Init SWtInitParams `view:"inline"`
- // [view: inline] adaptation of SWt values in response to LWt learning
- Adapt SWtAdaptParams `view:"inline" desc:"adaptation of SWt values in response to LWt learning"`
+ // adaptation of SWt values in response to LWt learning
+ Adapt SWtAdaptParams `view:"inline"`
- // [def: {'Min':0.2,'Max':0.8}] [view: inline] range limits for SWt values
- Limit minmax.F32 `def:"{'Min':0.2,'Max':0.8}" view:"inline" desc:"range limits for SWt values"`
+ // range limits for SWt values
+ Limit minmax.F32 `def:"{'Min':0.2,'Max':0.8}" view:"inline"`
}
func (sp *SWtParams) Defaults() {
@@ -633,17 +633,17 @@ func (sp *SWtParams) InitWtsSyn(ctx *Context, syni uint32, rnd erand.Rand, mean,
// LRateParams manages learning rate parameters
type LRateParams struct {
- // [def: 0.04,0.1,0.2] base learning rate for this projection -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2.
- Base float32 `def:"0.04,0.1,0.2" desc:"base learning rate for this projection -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2."`
+ // base learning rate for this projection -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2.
+ Base float32 `def:"0.04,0.1,0.2"`
// scheduled learning rate multiplier, simulating reduction in plasticity over aging
- Sched float32 `desc:"scheduled learning rate multiplier, simulating reduction in plasticity over aging"`
+ Sched float32
// dynamic learning rate modulation due to neuromodulatory or other such factors
- Mod float32 `desc:"dynamic learning rate modulation due to neuromodulatory or other such factors"`
+ Mod float32
// effective actual learning rate multiplier used in computing DWt: Eff = eMod * Sched * Base
- Eff float32 `inactive:"+" desc:"effective actual learning rate multiplier used in computing DWt: Eff = eMod * Sched * Base"`
+ Eff float32 `inactive:"+"`
}
func (ls *LRateParams) Defaults() {
@@ -671,17 +671,17 @@ func (ls *LRateParams) Init() {
// TraceParams manages learning rate parameters
type TraceParams struct {
- // [def: 1,2,4] time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace
- Tau float32 `def:"1,2,4" desc:"time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace"`
+ // time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace
+ Tau float32 `def:"1,2,4"`
- // [def: 0,1] amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning projections, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special prjn types (e.g., Hebb) benefit from SubMean = 1 always
- SubMean float32 `def:"0,1" desc:"amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning projections, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special prjn types (e.g., Hebb) benefit from SubMean = 1 always"`
+ // amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning projections, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special prjn types (e.g., Hebb) benefit from SubMean = 1 always
+ SubMean float32 `def:"0,1"`
// threshold for learning, depending on different algorithms -- in Matrix and VSPatch it applies to normalized GeIntNorm value -- setting this relatively high encourages sparser representations
- LearnThr float32 `desc:"threshold for learning, depending on different algorithms -- in Matrix and VSPatch it applies to normalized GeIntNorm value -- setting this relatively high encourages sparser representations"`
+ LearnThr float32
- // [view: -] rate = 1 / tau
- Dt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ Dt float32 `view:"-" json:"-" xml:"-" inactive:"+"`
}
func (tp *TraceParams) Defaults() {
@@ -712,15 +712,15 @@ func (tp *TraceParams) TrFmCa(tr float32, ca float32) float32 {
type LRateMod struct {
// toggle use of this modulation factor
- On slbool.Bool `desc:"toggle use of this modulation factor"`
+ On slbool.Bool
- // [viewif: On] [min: 0] [max: 1] baseline learning rate -- what you get for correct cases
- Base float32 `viewif:"On" min:"0" max:"1" desc:"baseline learning rate -- what you get for correct cases"`
+ // baseline learning rate -- what you get for correct cases
+ Base float32 `viewif:"On" min:"0" max:"1"`
pad, pad1 int32
- // [viewif: On] defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1
- Range minmax.F32 `viewif:"On" desc:"defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1"`
+ // defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1
+ Range minmax.F32 `viewif:"On"`
}
func (lr *LRateMod) Defaults() {
@@ -770,18 +770,18 @@ func (lr *LRateMod) LRateMod(net *Network, fact float32) float32 {
type LearnSynParams struct {
// enable learning for this projection
- Learn slbool.Bool `desc:"enable learning for this projection"`
+ Learn slbool.Bool
pad, pad1, pad2 int32
- // [viewif: Learn] learning rate parameters, supporting two levels of modulation on top of base learning rate.
- LRate LRateParams `viewif:"Learn" desc:"learning rate parameters, supporting two levels of modulation on top of base learning rate."`
+ // learning rate parameters, supporting two levels of modulation on top of base learning rate.
+ LRate LRateParams `viewif:"Learn"`
- // [viewif: Learn] trace-based learning parameters
- Trace TraceParams `viewif:"Learn" desc:"trace-based learning parameters"`
+ // trace-based learning parameters
+ Trace TraceParams `viewif:"Learn"`
- // [view: inline] [viewif: Learn] kinase calcium Ca integration parameters
- KinaseCa kinase.CaParams `viewif:"Learn" view:"inline" desc:"kinase calcium Ca integration parameters"`
+ // kinase calcium Ca integration parameters
+ KinaseCa kinase.CaParams `viewif:"Learn" view:"inline"`
}
func (ls *LearnSynParams) Update() {
diff --git a/axon/logging.go b/axon/logging.go
index 8dd13aaff..cff928567 100644
--- a/axon/logging.go
+++ b/axon/logging.go
@@ -7,19 +7,19 @@ package axon
import (
"strconv"
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/estats"
- "github.com/emer/emergent/etime"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/metric"
- "github.com/emer/etable/minmax"
- "github.com/emer/etable/norm"
- "github.com/emer/etable/split"
- "github.com/emer/etable/tsragg"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/estats"
+ "github.com/emer/emergent/v2/etime"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/metric"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/etable/v2/norm"
+ "goki.dev/etable/v2/split"
+ "goki.dev/etable/v2/tsragg"
)
// LogTestErrors records all errors made across TestTrials, at Test Epoch scope
@@ -627,11 +627,13 @@ func LayerActsLogRecReset(lg *elog.Logs) {
// LayerActsLogConfigGUI configures GUI for LayerActsLog Plot and LayerActs Avg Plot
func LayerActsLogConfigGUI(lg *elog.Logs, gui *egui.GUI) {
- plt := gui.TabView.AddNewTab(eplot.KiT_Plot2D, "LayerActs Plot").(*eplot.Plot2D)
+ pt := gui.Tabs.NewTab("LayerActs Plot")
+ plt := eplot.NewPlot2D(pt)
gui.Plots["LayerActs"] = plt
plt.SetTable(lg.MiscTables["LayerActs"])
- plt = gui.TabView.AddNewTab(eplot.KiT_Plot2D, "LayerActs Avg Plot").(*eplot.Plot2D)
+ pt = gui.Tabs.NewTab("LayerActs Avg Plot")
+ plt = eplot.NewPlot2D(pt)
gui.Plots["LayerActsAvg"] = plt
plt.SetTable(lg.MiscTables["LayerActsAvg"])
}
diff --git a/axon/looper.go b/axon/looper.go
index e5f5b9557..91ddd9e03 100644
--- a/axon/looper.go
+++ b/axon/looper.go
@@ -5,11 +5,11 @@
package axon
import (
- "github.com/emer/emergent/egui"
- "github.com/emer/emergent/elog"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/looper"
- "github.com/emer/emergent/netview"
+ "github.com/emer/emergent/v2/egui"
+ "github.com/emer/emergent/v2/elog"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
)
// LooperStdPhases adds the minus and plus phases of the theta cycle,
diff --git a/axon/network.go b/axon/network.go
index 776a2d32a..74253f8dc 100644
--- a/axon/network.go
+++ b/axon/network.go
@@ -9,11 +9,11 @@ import (
"strings"
"github.com/c2h5oh/datasize"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/prjn"
"github.com/goki/ki/ki"
"github.com/goki/ki/kit"
+ "goki.dev/etable/v2/etensor"
)
// axon.Network implements the Axon spiking model,
diff --git a/axon/network_test.go b/axon/network_test.go
index dfa7d38ef..1ebb36635 100644
--- a/axon/network_test.go
+++ b/axon/network_test.go
@@ -5,7 +5,7 @@ package axon
import (
"testing"
- "github.com/emer/emergent/emer"
+ "github.com/emer/emergent/v2/emer"
"github.com/stretchr/testify/assert"
)
diff --git a/axon/networkbase.go b/axon/networkbase.go
index 792fd19dc..c021f120f 100644
--- a/axon/networkbase.go
+++ b/axon/networkbase.go
@@ -20,150 +20,150 @@ import (
"strings"
"time"
- "github.com/emer/emergent/econfig"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/netparams"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
- "github.com/emer/emergent/timer"
- "github.com/emer/emergent/weights"
- "github.com/goki/gi/gi"
+ "github.com/emer/emergent/v2/econfig"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/netparams"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
+ "github.com/emer/emergent/v2/timer"
+ "github.com/emer/emergent/v2/weights"
"github.com/goki/ki/indent"
"github.com/goki/kigen/dedupe"
- "github.com/goki/mat32"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/mat32/v2"
)
// NetworkBase manages the basic structural components of a network (layers).
// The main Network then can just have the algorithm-specific code.
type NetworkBase struct {
- // [view: -] we need a pointer to ourselves as an emer.Network, which can always be used to extract the true underlying type of object when network is embedded in other structs -- function receivers do not have this ability so this is necessary.
- EmerNet emer.Network `copy:"-" json:"-" xml:"-" view:"-" desc:"we need a pointer to ourselves as an emer.Network, which can always be used to extract the true underlying type of object when network is embedded in other structs -- function receivers do not have this ability so this is necessary."`
+ // we need a pointer to ourselves as an emer.Network, which can always be used to extract the true underlying type of object when network is embedded in other structs -- function receivers do not have this ability so this is necessary.
+ EmerNet emer.Network `copy:"-" json:"-" xml:"-" view:"-"`
// overall name of network -- helps discriminate if there are multiple
- Nm string `desc:"overall name of network -- helps discriminate if there are multiple"`
+ Nm string
// filename of last weights file loaded or saved
- WtsFile string `desc:"filename of last weights file loaded or saved"`
+ WtsFile string
// PVLV system for phasic dopamine signaling, including internal drives, US outcomes. Core LHb (lateral habenula) and VTA (ventral tegmental area) dopamine are computed in equations using inputs from specialized network layers (LDTLayer driven by BLA, CeM layers, VSPatchLayer). Renders USLayer, PVLayer, DrivesLayer representations based on state updated here.
- PVLV PVLV `desc:"PVLV system for phasic dopamine signaling, including internal drives, US outcomes. Core LHb (lateral habenula) and VTA (ventral tegmental area) dopamine are computed in equations using inputs from specialized network layers (LDTLayer driven by BLA, CeM layers, VSPatchLayer). Renders USLayer, PVLayer, DrivesLayer representations based on state updated here."`
+ PVLV PVLV
- // [view: -] map of name to layers -- layer names must be unique
- LayMap map[string]*Layer `view:"-" desc:"map of name to layers -- layer names must be unique"`
+ // map of name to layers -- layer names must be unique
+ LayMap map[string]*Layer `view:"-"`
- // [view: -] map of layer classes -- made during Build
- LayClassMap map[string][]string `view:"-" desc:"map of layer classes -- made during Build"`
+ // map of layer classes -- made during Build
+ LayClassMap map[string][]string `view:"-"`
- // [view: -] minimum display position in network
- MinPos mat32.Vec3 `view:"-" desc:"minimum display position in network"`
+ // minimum display position in network
+ MinPos mat32.Vec3 `view:"-"`
- // [view: -] maximum display position in network
- MaxPos mat32.Vec3 `view:"-" desc:"maximum display position in network"`
+ // maximum display position in network
+ MaxPos mat32.Vec3 `view:"-"`
// optional metadata that is saved in network weights files -- e.g., can indicate number of epochs that were trained, or any other information about this network that would be useful to save
- MetaData map[string]string `desc:"optional metadata that is saved in network weights files -- e.g., can indicate number of epochs that were trained, or any other information about this network that would be useful to save"`
+ MetaData map[string]string
// if true, the neuron and synapse variables will be organized into a gpu-optimized memory order, otherwise cpu-optimized. This must be set before network Build() is called.
- UseGPUOrder bool `inactive:"+" desc:"if true, the neuron and synapse variables will be organized into a gpu-optimized memory order, otherwise cpu-optimized. This must be set before network Build() is called."`
+ UseGPUOrder bool `inactive:"+"`
- // [view: -] network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode
- NetIdx uint32 `view:"-" desc:"network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode"`
+ // network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode
+ NetIdx uint32 `view:"-"`
- // [view: -] maximum synaptic delay across any projection in the network -- used for sizing the GBuf accumulation buffer.
- MaxDelay uint32 `inactive:"+" view:"-" desc:"maximum synaptic delay across any projection in the network -- used for sizing the GBuf accumulation buffer."`
+ // maximum synaptic delay across any projection in the network -- used for sizing the GBuf accumulation buffer.
+ MaxDelay uint32 `inactive:"+" view:"-"`
// maximum number of data inputs that can be processed in parallel in one pass of the network. Neuron storage is allocated to hold this amount during Build process, and this value reflects that.
- MaxData uint32 `inactive:"+" desc:"maximum number of data inputs that can be processed in parallel in one pass of the network. Neuron storage is allocated to hold this amount during Build process, and this value reflects that."`
+ MaxData uint32 `inactive:"+"`
// total number of neurons
- NNeurons uint32 `inactive:"+" desc:"total number of neurons"`
+ NNeurons uint32 `inactive:"+"`
// total number of synapses
- NSyns uint32 `inactive:"+" desc:"total number of synapses"`
+ NSyns uint32 `inactive:"+"`
- // [view: -] storage for global vars
- Globals []float32 `view:"-" desc:"storage for global vars"`
+ // storage for global vars
+ Globals []float32 `view:"-"`
// array of layers
- Layers []*Layer `desc:"array of layers"`
+ Layers []*Layer
- // [view: -] [Layers] array of layer parameters, in 1-to-1 correspondence with Layers
- LayParams []LayerParams `view:"-" desc:"[Layers] array of layer parameters, in 1-to-1 correspondence with Layers"`
+ // array of layer parameters, in 1-to-1 correspondence with Layers
+ LayParams []LayerParams `view:"-"`
- // [view: -] [Layers][MaxData] array of layer values, with extra per data
- LayVals []LayerVals `view:"-" desc:"[Layers][MaxData] array of layer values, with extra per data"`
+ // array of layer values, with extra per data
+ LayVals []LayerVals `view:"-"`
- // [view: -] [Layers][Pools][MaxData] array of inhibitory pools for all layers.
- Pools []Pool `view:"-" desc:"[Layers][Pools][MaxData] array of inhibitory pools for all layers."`
+ // array of inhibitory pools for all layers.
+ Pools []Pool `view:"-"`
- // [view: -] [Layers][Neurons][MaxData] entire network's allocation of neuron variables, accessed via NrnV function with flexible striding
- Neurons []float32 `view:"-" desc:"[Layers][Neurons][MaxData] entire network's allocation of neuron variables, accessed via NrnV function with flexible striding"`
+ // entire network's allocation of neuron variables, accessed via NrnV function with flexible striding
+ Neurons []float32 `view:"-"`
- // [view: -] [Layers][Neurons][MaxData]] entire network's allocation of neuron average avariables, accessed via NrnAvgV function with flexible striding
- NeuronAvgs []float32 `view:"-" desc:"[Layers][Neurons][MaxData]] entire network's allocation of neuron average avariables, accessed via NrnAvgV function with flexible striding"`
+ // ] entire network's allocation of neuron average avariables, accessed via NrnAvgV function with flexible striding
+ NeuronAvgs []float32 `view:"-"`
- // [view: -] [Layers][Neurons] entire network's allocation of neuron index variables, accessed via NrnI function with flexible striding
- NeuronIxs []uint32 `view:"-" desc:"[Layers][Neurons] entire network's allocation of neuron index variables, accessed via NrnI function with flexible striding"`
+ // entire network's allocation of neuron index variables, accessed via NrnI function with flexible striding
+ NeuronIxs []uint32 `view:"-"`
- // [view: -] [Layers][SendPrjns] pointers to all projections in the network, sender-based
- Prjns []*Prjn `view:"-" desc:"[Layers][SendPrjns] pointers to all projections in the network, sender-based"`
+ // pointers to all projections in the network, sender-based
+ Prjns []*Prjn `view:"-"`
- // [view: -] [Layers][SendPrjns] array of projection parameters, in 1-to-1 correspondence with Prjns, sender-based
- PrjnParams []PrjnParams `view:"-" desc:"[Layers][SendPrjns] array of projection parameters, in 1-to-1 correspondence with Prjns, sender-based"`
+ // array of projection parameters, in 1-to-1 correspondence with Prjns, sender-based
+ PrjnParams []PrjnParams `view:"-"`
- // [view: -] [Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapse idx vars, organized sender-based, with flexible striding, accessed via SynI function
- SynapseIxs []uint32 `view:"-" desc:"[Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapse idx vars, organized sender-based, with flexible striding, accessed via SynI function"`
+ // entire network's allocation of synapse idx vars, organized sender-based, with flexible striding, accessed via SynI function
+ SynapseIxs []uint32 `view:"-"`
- // [view: -] [Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapses, organized sender-based, with flexible striding, accessed via SynV function
- Synapses []float32 `view:"-" desc:"[Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapses, organized sender-based, with flexible striding, accessed via SynV function"`
+ // entire network's allocation of synapses, organized sender-based, with flexible striding, accessed via SynV function
+ Synapses []float32 `view:"-"`
- // [view: -] [Layers][SendPrjns][SendNeurons][RecvNeurons][MaxData] entire network's allocation of synapse Ca vars, organized sender-based, with flexible striding, accessed via SynCaV function
- SynapseCas []float32 `view:"-" desc:"[Layers][SendPrjns][SendNeurons][RecvNeurons][MaxData] entire network's allocation of synapse Ca vars, organized sender-based, with flexible striding, accessed via SynCaV function"`
+ // entire network's allocation of synapse Ca vars, organized sender-based, with flexible striding, accessed via SynCaV function
+ SynapseCas []float32 `view:"-"`
- // [view: -] [Layers][SendPrjns][SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based.
- PrjnSendCon []StartN `view:"-" desc:"[Layers][SendPrjns][SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based."`
+ // starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based.
+ PrjnSendCon []StartN `view:"-"`
- // [view: -] [Layers][RecvPrjns][RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based.
- PrjnRecvCon []StartN `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based."`
+ // starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based.
+ PrjnRecvCon []StartN `view:"-"`
- // [view: -] [Layers][RecvPrjns][RecvNeurons][MaxDelay][MaxData] conductance buffer for accumulating spikes -- subslices are allocated to each projection -- uses int-encoded float values for faster GPU atomic integration
- PrjnGBuf []int32 `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons][MaxDelay][MaxData] conductance buffer for accumulating spikes -- subslices are allocated to each projection -- uses int-encoded float values for faster GPU atomic integration"`
+ // conductance buffer for accumulating spikes -- subslices are allocated to each projection -- uses int-encoded float values for faster GPU atomic integration
+ PrjnGBuf []int32 `view:"-"`
- // [view: -] [Layers][RecvPrjns][RecvNeurons][MaxData] synaptic conductance integrated over time per projection per recv neurons -- spikes come in via PrjnBuf -- subslices are allocated to each projection
- PrjnGSyns []float32 `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons][MaxData] synaptic conductance integrated over time per projection per recv neurons -- spikes come in via PrjnBuf -- subslices are allocated to each projection"`
+ // synaptic conductance integrated over time per projection per recv neurons -- spikes come in via PrjnBuf -- subslices are allocated to each projection
+ PrjnGSyns []float32 `view:"-"`
- // [view: -] [Layers][RecvPrjns] indexes into Prjns (organized by SendPrjn) organized by recv projections -- needed for iterating through recv prjns efficiently on GPU.
- RecvPrjnIdxs []uint32 `view:"-" desc:"[Layers][RecvPrjns] indexes into Prjns (organized by SendPrjn) organized by recv projections -- needed for iterating through recv prjns efficiently on GPU."`
+ // indexes into Prjns (organized by SendPrjn) organized by recv projections -- needed for iterating through recv prjns efficiently on GPU.
+ RecvPrjnIdxs []uint32 `view:"-"`
- // [view: -] [Layers][RecvPrjns][RecvNeurons][Syns] indexes into Synapses for each recv neuron, organized into blocks according to PrjnRecvCon, for receiver-based access.
- RecvSynIdxs []uint32 `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons][Syns] indexes into Synapses for each recv neuron, organized into blocks according to PrjnRecvCon, for receiver-based access."`
+ // indexes into Synapses for each recv neuron, organized into blocks according to PrjnRecvCon, for receiver-based access.
+ RecvSynIdxs []uint32 `view:"-"`
- // [In / Targ Layers][Neurons][Data] external input values for all Input / Target / Compare layers in the network -- the ApplyExt methods write to this per layer, and it is then actually applied in one consistent method.
- Exts []float32 `desc:"[In / Targ Layers][Neurons][Data] external input values for all Input / Target / Compare layers in the network -- the ApplyExt methods write to this per layer, and it is then actually applied in one consistent method."`
+ // external input values for all Input / Target / Compare layers in the network -- the ApplyExt methods write to this per layer, and it is then actually applied in one consistent method.
+ Exts []float32
- // [view: -] context used only for accessing neurons for display -- NetIdxs.NData in here is copied from active context in NewState
- Ctx Context `view:"-" desc:"context used only for accessing neurons for display -- NetIdxs.NData in here is copied from active context in NewState"`
+ // context used only for accessing neurons for display -- NetIdxs.NData in here is copied from active context in NewState
+ Ctx Context `view:"-"`
- // [view: -] random number generator for the network -- all random calls must use this -- set seed here for weight initialization values
- Rand erand.SysRand `view:"-" desc:"random number generator for the network -- all random calls must use this -- set seed here for weight initialization values"`
+ // random number generator for the network -- all random calls must use this -- set seed here for weight initialization values
+ Rand erand.SysRand `view:"-"`
// random seed to be set at the start of configuring the network and initializing the weights -- set this to get a different set of weights
- RndSeed int64 `inactive:"+" desc:"random seed to be set at the start of configuring the network and initializing the weights -- set this to get a different set of weights"`
+ RndSeed int64 `inactive:"+"`
// number of threads to use for parallel processing
- NThreads int `desc:"number of threads to use for parallel processing"`
+ NThreads int
- // [view: inline] GPU implementation
- GPU GPU `view:"inline" desc:"GPU implementation"`
+ // GPU implementation
+ GPU GPU `view:"inline"`
- // [view: -] record function timer information
- RecFunTimes bool `view:"-" desc:"record function timer information"`
+ // record function timer information
+ RecFunTimes bool `view:"-"`
- // [view: -] timers for each major function (step of processing)
- FunTimes map[string]*timer.Time `view:"-" desc:"timers for each major function (step of processing)"`
+ // timers for each major function (step of processing)
+ FunTimes map[string]*timer.Time `view:"-"`
}
// emer.Network interface methods:
diff --git a/axon/networkbase_test.go b/axon/networkbase_test.go
index d91cd0a4f..57eba9b0b 100644
--- a/axon/networkbase_test.go
+++ b/axon/networkbase_test.go
@@ -3,7 +3,7 @@ package axon
import (
"testing"
- "github.com/emer/emergent/prjn"
+ "github.com/emer/emergent/v2/prjn"
"github.com/stretchr/testify/assert"
)
diff --git a/axon/neuromod.go b/axon/neuromod.go
index 2979b08c7..3a39f66b2 100644
--- a/axon/neuromod.go
+++ b/axon/neuromod.go
@@ -7,7 +7,7 @@ package axon
import (
"github.com/goki/gosl/slbool"
"github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//go:generate stringer -type=DAModTypes
@@ -63,31 +63,31 @@ const (
type NeuroModParams struct {
// dopamine receptor-based effects of dopamine modulation on excitatory and inhibitory conductances: D1 is excitatory, D2 is inhibitory as a function of increasing dopamine
- DAMod DAModTypes `desc:"dopamine receptor-based effects of dopamine modulation on excitatory and inhibitory conductances: D1 is excitatory, D2 is inhibitory as a function of increasing dopamine"`
+ DAMod DAModTypes
// valence coding of this layer -- may affect specific layer types but does not directly affect neuromodulators currently
- Valence ValenceTypes `desc:"valence coding of this layer -- may affect specific layer types but does not directly affect neuromodulators currently"`
+ Valence ValenceTypes
- // [viewif: DAMod!=NoDAMod] multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor
- DAModGain float32 `viewif:"DAMod!=NoDAMod" desc:"multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor"`
+ // multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor
+ DAModGain float32 `viewif:"DAMod!=NoDAMod"`
// modulate the sign of the learning rate factor according to the DA sign, taking into account the DAMod sign reversal for D2Mod, also using BurstGain and DipGain to modulate DA value -- otherwise, only the magnitude of the learning rate is modulated as a function of raw DA magnitude according to DALRateMod (without additional gain factors)
- DALRateSign slbool.Bool `desc:"modulate the sign of the learning rate factor according to the DA sign, taking into account the DAMod sign reversal for D2Mod, also using BurstGain and DipGain to modulate DA value -- otherwise, only the magnitude of the learning rate is modulated as a function of raw DA magnitude according to DALRateMod (without additional gain factors)"`
+ DALRateSign slbool.Bool
- // [viewif: !DALRateSign] [min: 0] [max: 1] if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%
- DALRateMod float32 `min:"0" max:"1" viewif:"!DALRateSign" desc:"if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%"`
+ // if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%
+ DALRateMod float32 `min:"0" max:"1" viewif:"!DALRateSign"`
- // [min: 0] [max: 1] proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%
- AChLRateMod float32 `min:"0" max:"1" desc:"proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%"`
+ // proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%
+ AChLRateMod float32 `min:"0" max:"1"`
- // [def: 0,5] [min: 0] amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory
- AChDisInhib float32 `min:"0" def:"0,5" desc:"amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory"`
+ // amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory
+ AChDisInhib float32 `min:"0" def:"0,5"`
- // [def: 1] [min: 0] multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign!
- BurstGain float32 `min:"0" def:"1" desc:"multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign!"`
+ // multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign!
+ BurstGain float32 `min:"0" def:"1"`
- // [def: 1] [min: 0] multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext
- DipGain float32 `min:"0" def:"1" desc:"multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext"`
+ // multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext
+ DipGain float32 `min:"0" def:"1"`
pad, pad1, pad2 float32
}
diff --git a/axon/neuron.go b/axon/neuron.go
index 93bb153b8..bf0194121 100644
--- a/axon/neuron.go
+++ b/axon/neuron.go
@@ -7,7 +7,7 @@ package axon
import (
"fmt"
- "github.com/emer/emergent/netview"
+ "github.com/emer/emergent/v2/netview"
"github.com/goki/ki/kit"
)
diff --git a/axon/pcore_layers.go b/axon/pcore_layers.go
index 601610139..56bba03c0 100644
--- a/axon/pcore_layers.go
+++ b/axon/pcore_layers.go
@@ -9,8 +9,8 @@ import (
"strings"
"github.com/goki/gosl/slbool"
- "github.com/goki/ki/bools"
"github.com/goki/ki/kit"
+ "goki.dev/glop/num"
)
//gosl: start pcore_layers
@@ -23,32 +23,32 @@ import (
// Must set Learn.NeuroMod.DAMod = D1Mod or D2Mod via SetBuildConfig("DAMod").
type MatrixParams struct {
- // [def: 0.05] threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated
- GateThr float32 `def:"0.05" desc:"threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated"`
+ // threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated
+ GateThr float32 `def:"0.05"`
// is this a ventral striatum (VS) matrix layer? if true, the gating status of this layer is recorded in the Global state, and used for updating effort and other factors.
- IsVS slbool.Bool `desc:"is this a ventral striatum (VS) matrix layer? if true, the gating status of this layer is recorded in the Global state, and used for updating effort and other factors."`
+ IsVS slbool.Bool
// index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName
- OtherMatrixIdx int32 `inactive:"+" desc:"index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName"`
+ OtherMatrixIdx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used
- ThalLay1Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used"`
+ ThalLay1Idx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay2Name if present -- -1 if not used
- ThalLay2Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay2Name if present -- -1 if not used"`
+ ThalLay2Idx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay3Name if present -- -1 if not used
- ThalLay3Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay3Name if present -- -1 if not used"`
+ ThalLay3Idx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay4Name if present -- -1 if not used
- ThalLay4Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay4Name if present -- -1 if not used"`
+ ThalLay4Idx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay5Name if present -- -1 if not used
- ThalLay5Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay5Name if present -- -1 if not used"`
+ ThalLay5Idx int32 `inactive:"+"`
// index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay6Name if present -- -1 if not used
- ThalLay6Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay6Name if present -- -1 if not used"`
+ ThalLay6Idx int32 `inactive:"+"`
pad, pad1, pad2 int32
}
@@ -87,8 +87,8 @@ const (
// Typically just a single unit per Pool representing a given stripe.
type GPParams struct {
- // [view: inline] [viewif: LayType=GPLayer] type of GP Layer -- must set during config using SetBuildConfig of GPType.
- GPType GPLayerTypes `viewif:"LayType=GPLayer" view:"inline" desc:"type of GP Layer -- must set during config using SetBuildConfig of GPType."`
+ // type of GP Layer -- must set during config using SetBuildConfig of GPType.
+ GPType GPLayerTypes `viewif:"LayType=GPLayer" view:"inline"`
pad, pad1, pad2 uint32
}
@@ -164,7 +164,7 @@ func (ly *Layer) MatrixGated(ctx *Context) {
}
}
if ctx.PlusPhase.IsTrue() && ly.Params.Matrix.IsVS.IsTrue() {
- SetGlbV(ctx, di, GvVSMatrixJustGated, bools.ToFloat32(mtxGated))
+ SetGlbV(ctx, di, GvVSMatrixJustGated, num.FromBool[float32](mtxGated))
if mtxGated {
SetGlbUSposV(ctx, di, GvVSMatrixPoolGated, uint32(poolIdx), 1)
}
diff --git a/axon/pcore_net.go b/axon/pcore_net.go
index 6567d068c..545b02aae 100644
--- a/axon/pcore_net.go
+++ b/axon/pcore_net.go
@@ -5,7 +5,7 @@
package axon
import (
- "github.com/emer/emergent/prjn"
+ "github.com/emer/emergent/v2/prjn"
)
// AddBG adds MtxGo, MtxNo, GPeOut, GPeIn, GPeTA, STNp, STNs, GPi layers,
diff --git a/axon/pool.go b/axon/pool.go
index 14abedc62..672c2914e 100644
--- a/axon/pool.go
+++ b/axon/pool.go
@@ -7,7 +7,7 @@ package axon
import (
"github.com/emer/axon/fsfffb"
"github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: hlsl pool
@@ -24,17 +24,17 @@ import (
// based on values from the prior cycle -- thus are 1 cycle behind in general.
type AvgMaxPhases struct {
- // [view: inline] updated every cycle -- this is the source of all subsequent time scales
- Cycle AvgMaxI32 `view:"inline" desc:"updated every cycle -- this is the source of all subsequent time scales"`
+ // updated every cycle -- this is the source of all subsequent time scales
+ Cycle AvgMaxI32 `view:"inline"`
- // [view: inline] at the end of the minus phase
- Minus AvgMaxI32 `view:"inline" desc:"at the end of the minus phase"`
+ // at the end of the minus phase
+ Minus AvgMaxI32 `view:"inline"`
- // [view: inline] at the end of the plus phase
- Plus AvgMaxI32 `view:"inline" desc:"at the end of the plus phase"`
+ // at the end of the plus phase
+ Plus AvgMaxI32 `view:"inline"`
- // [view: inline] at the end of the previous plus phase
- Prev AvgMaxI32 `view:"inline" desc:"at the end of the previous plus phase"`
+ // at the end of the previous plus phase
+ Prev AvgMaxI32 `view:"inline"`
}
// CycleToMinus grabs current Cycle values into the Minus phase values
@@ -67,23 +67,23 @@ func (am *AvgMaxPhases) Zero() {
// based on values from the prior cycle -- thus are 1 cycle behind in general.
type PoolAvgMax struct {
- // [view: inline] avg and maximum CaSpkP (continuously updated at roughly 40 msec integration window timescale, ends up capturing potentiation, plus-phase signal) -- this is the primary variable to use for tracking overall pool activity
- CaSpkP AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum CaSpkP (continuously updated at roughly 40 msec integration window timescale, ends up capturing potentiation, plus-phase signal) -- this is the primary variable to use for tracking overall pool activity"`
+ // avg and maximum CaSpkP (continuously updated at roughly 40 msec integration window timescale, ends up capturing potentiation, plus-phase signal) -- this is the primary variable to use for tracking overall pool activity
+ CaSpkP AvgMaxPhases `inactive:"+" view:"inline"`
- // [view: inline] avg and maximum CaSpkD longer-term depression / DAPK1 signal in layer
- CaSpkD AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum CaSpkD longer-term depression / DAPK1 signal in layer"`
+ // avg and maximum CaSpkD longer-term depression / DAPK1 signal in layer
+ CaSpkD AvgMaxPhases `inactive:"+" view:"inline"`
- // [view: inline] avg and maximum SpkMax value (based on CaSpkP) -- reflects peak activity at any point across the cycle
- SpkMax AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum SpkMax value (based on CaSpkP) -- reflects peak activity at any point across the cycle"`
+ // avg and maximum SpkMax value (based on CaSpkP) -- reflects peak activity at any point across the cycle
+ SpkMax AvgMaxPhases `inactive:"+" view:"inline"`
- // [view: inline] avg and maximum Act firing rate value
- Act AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum Act firing rate value"`
+ // avg and maximum Act firing rate value
+ Act AvgMaxPhases `inactive:"+" view:"inline"`
- // [view: inline] avg and maximum GeInt integrated running-average excitatory conductance value
- GeInt AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum GeInt integrated running-average excitatory conductance value"`
+ // avg and maximum GeInt integrated running-average excitatory conductance value
+ GeInt AvgMaxPhases `inactive:"+" view:"inline"`
- // [view: inline] avg and maximum GiInt integrated running-average inhibitory conductance value
- GiInt AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum GiInt integrated running-average inhibitory conductance value"`
+ // avg and maximum GiInt integrated running-average inhibitory conductance value
+ GiInt AvgMaxPhases `inactive:"+" view:"inline"`
}
// SetN sets the N for aggregation
@@ -177,33 +177,33 @@ func (am *PoolAvgMax) Calc(refIdx int32) {
type Pool struct {
// starting and ending (exlusive) layer-wise indexes for the list of neurons in this pool
- StIdx, EdIdx uint32 `inactive:"+" desc:"starting and ending (exlusive) layer-wise indexes for the list of neurons in this pool"`
+ StIdx, EdIdx uint32 `inactive:"+"`
- // [view: -] layer index in global layer list
- LayIdx uint32 `view:"-" desc:"layer index in global layer list"`
+ // layer index in global layer list
+ LayIdx uint32 `view:"-"`
- // [view: -] data parallel index (innermost index per layer)
- DataIdx uint32 `view:"-" desc:"data parallel index (innermost index per layer)"`
+ // data parallel index (innermost index per layer)
+ DataIdx uint32 `view:"-"`
- // [view: -] pool index in global pool list: [Layer][Pool][Data]
- PoolIdx uint32 `view:"-" desc:"pool index in global pool list: [Layer][Pool][Data]"`
+ // pool index in global pool list:
+ PoolIdx uint32 `view:"-"`
// is this a layer-wide pool? if not, it represents a sub-pool of units within a 4D layer
- IsLayPool slbool.Bool `inactive:"+" desc:"is this a layer-wide pool? if not, it represents a sub-pool of units within a 4D layer"`
+ IsLayPool slbool.Bool `inactive:"+"`
// for special types where relevant (e.g., MatrixLayer, BGThalLayer), indicates if the pool was gated
- Gated slbool.Bool `inactive:"+" desc:"for special types where relevant (e.g., MatrixLayer, BGThalLayer), indicates if the pool was gated"`
+ Gated slbool.Bool `inactive:"+"`
pad uint32
// fast-slow FFFB inhibition values
- Inhib fsfffb.Inhib `inactive:"+" desc:"fast-slow FFFB inhibition values"`
+ Inhib fsfffb.Inhib `inactive:"+"`
// average and max values for relevant variables in this pool, at different time scales
- AvgMax PoolAvgMax `desc:"average and max values for relevant variables in this pool, at different time scales"`
+ AvgMax PoolAvgMax
- // [view: inline] absolute value of AvgDif differences from actual neuron ActPct relative to TrgAvg
- AvgDif AvgMaxI32 `inactive:"+" view:"inline" desc:"absolute value of AvgDif differences from actual neuron ActPct relative to TrgAvg"`
+ // absolute value of AvgDif differences from actual neuron ActPct relative to TrgAvg
+ AvgDif AvgMaxI32 `inactive:"+" view:"inline"`
}
// Init is callled during InitActs
diff --git a/axon/pool_test.go b/axon/pool_test.go
index 6beba9bc2..b77a1e32c 100644
--- a/axon/pool_test.go
+++ b/axon/pool_test.go
@@ -10,8 +10,8 @@ import (
"os"
"testing"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
)
// Note: subsequent params applied after Base
diff --git a/axon/prjn.go b/axon/prjn.go
index 5a936cb6e..f8e1cd076 100644
--- a/axon/prjn.go
+++ b/axon/prjn.go
@@ -9,12 +9,12 @@ import (
"io"
"strconv"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/weights"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/weights"
"github.com/goki/ki/indent"
"github.com/goki/ki/ki"
"github.com/goki/ki/kit"
+ "goki.dev/etable/v2/etensor"
)
// https://github.com/kisvegabor/abbreviations-in-code suggests Buf instead of Buff
@@ -28,7 +28,7 @@ type Prjn struct {
PrjnBase
// all prjn-level parameters -- these must remain constant once configured
- Params *PrjnParams `desc:"all prjn-level parameters -- these must remain constant once configured"`
+ Params *PrjnParams
}
var KiT_Prjn = kit.Types.AddType(&Prjn{}, PrjnProps)
diff --git a/axon/prjnbase.go b/axon/prjnbase.go
index cec124f93..a6ef5dabd 100644
--- a/axon/prjnbase.go
+++ b/axon/prjnbase.go
@@ -8,13 +8,13 @@ import (
"errors"
"log"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/minmax"
- "github.com/goki/gi/giv"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/minmax"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/mat32/v2"
)
// index naming:
@@ -31,68 +31,68 @@ import (
// accessed via the AxonPrj field.
type PrjnBase struct {
- // [view: -] we need a pointer to ourselves as an AxonPrjn, which can always be used to extract the true underlying type of object when prjn is embedded in other structs -- function receivers do not have this ability so this is necessary.
- AxonPrj AxonPrjn `copy:"-" json:"-" xml:"-" view:"-" desc:"we need a pointer to ourselves as an AxonPrjn, which can always be used to extract the true underlying type of object when prjn is embedded in other structs -- function receivers do not have this ability so this is necessary."`
+ // we need a pointer to ourselves as an AxonPrjn, which can always be used to extract the true underlying type of object when prjn is embedded in other structs -- function receivers do not have this ability so this is necessary.
+ AxonPrj AxonPrjn `copy:"-" json:"-" xml:"-" view:"-"`
// inactivate this projection -- allows for easy experimentation
- Off bool `desc:"inactivate this projection -- allows for easy experimentation"`
+ Off bool
// Class is for applying parameter styles, can be space separated multple tags
- Cls string `desc:"Class is for applying parameter styles, can be space separated multple tags"`
+ Cls string
// can record notes about this projection here
- Notes string `desc:"can record notes about this projection here"`
+ Notes string
// sending layer for this projection
- Send *Layer `desc:"sending layer for this projection"`
+ Send *Layer
// receiving layer for this projection
- Recv *Layer `desc:"receiving layer for this projection"`
+ Recv *Layer
- // [tableview: -] pattern of connectivity
- Pat prjn.Pattern `tableview:"-" desc:"pattern of connectivity"`
+ // pattern of connectivity
+ Pat prjn.Pattern `tableview:"-"`
// type of projection -- Forward, Back, Lateral, or extended type in specialized algorithms -- matches against .Cls parameter styles (e.g., .Back etc)
- Typ PrjnTypes `desc:"type of projection -- Forward, Back, Lateral, or extended type in specialized algorithms -- matches against .Cls parameter styles (e.g., .Back etc)"`
+ Typ PrjnTypes
- // [tableview: -] default parameters that are applied prior to user-set parameters -- these are useful for specific functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a prjn type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map.
- DefParams params.Params `tableview:"-" desc:"default parameters that are applied prior to user-set parameters -- these are useful for specific functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a prjn type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map."`
+ // default parameters that are applied prior to user-set parameters -- these are useful for specific functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a prjn type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map.
+ DefParams params.Params `tableview:"-"`
- // [tableview: -] provides a history of parameters applied to the layer
- ParamsHistory params.HistoryImpl `tableview:"-" desc:"provides a history of parameters applied to the layer"`
+ // provides a history of parameters applied to the layer
+ ParamsHistory params.HistoryImpl `tableview:"-"`
- // [view: inline] [tableview: -] average and maximum number of recv connections in the receiving layer
- RecvConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline" desc:"average and maximum number of recv connections in the receiving layer"`
+ // average and maximum number of recv connections in the receiving layer
+ RecvConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline"`
- // [view: inline] [tableview: -] average and maximum number of sending connections in the sending layer
- SendConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline" desc:"average and maximum number of sending connections in the sending layer"`
+ // average and maximum number of sending connections in the sending layer
+ SendConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline"`
- // [view: -] start index into global Synapse array: [Layer][SendPrjns][Synapses]
- SynStIdx uint32 `view:"-" desc:"start index into global Synapse array: [Layer][SendPrjns][Synapses]"`
+ // start index into global Synapse array:
+ SynStIdx uint32 `view:"-"`
- // [view: -] number of synapses in this projection
- NSyns uint32 `view:"-" desc:"number of synapses in this projection"`
+ // number of synapses in this projection
+ NSyns uint32 `view:"-"`
- // [view: -] [RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnRecvCons slice for GPU usage.
- RecvCon []StartN `view:"-" desc:"[RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnRecvCons slice for GPU usage."`
+ // starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnRecvCons slice for GPU usage.
+ RecvCon []StartN `view:"-"`
- // [view: -] [SendNeurons][SendCon.N RecvNeurons] index into Syns synaptic state for each sending unit and connection within that, for the sending projection which does not own the synapses, and instead indexes into recv-ordered list
- RecvSynIdx []uint32 `view:"-" desc:"[SendNeurons][SendCon.N RecvNeurons] index into Syns synaptic state for each sending unit and connection within that, for the sending projection which does not own the synapses, and instead indexes into recv-ordered list"`
+ // index into Syns synaptic state for each sending unit and connection within that, for the sending projection which does not own the synapses, and instead indexes into recv-ordered list
+ RecvSynIdx []uint32 `view:"-"`
- // [view: -] [RecvNeurons][RecvCon.N SendingNeurons] for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse.
- RecvConIdx []uint32 `view:"-" desc:"[RecvNeurons][RecvCon.N SendingNeurons] for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse."`
+ // for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse.
+ RecvConIdx []uint32 `view:"-"`
- // [view: -] [SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnSendCons slice for GPU usage.
- SendCon []StartN `view:"-" desc:"[SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnSendCons slice for GPU usage."`
+ // starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnSendCons slice for GPU usage.
+ SendCon []StartN `view:"-"`
- // [view: -] [SendNeurons[[SendCon.N RecvNeurons] index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse.
- SendConIdx []uint32 `view:"-" desc:"[SendNeurons[[SendCon.N RecvNeurons] index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse."`
+ // index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse.
+ SendConIdx []uint32 `view:"-"`
- // [view: -] [RecvNeurons][Params.Com.MaxDelay][MaxData] Ge or Gi conductance ring buffer for each neuron, accessed through Params.Com.ReadIdx, WriteIdx -- scale * weight is added with Com delay offset -- a subslice from network PrjnGBuf. Uses int-encoded float values for faster GPU atomic integration
- GBuf []int32 `view:"-" desc:"[RecvNeurons][Params.Com.MaxDelay][MaxData] Ge or Gi conductance ring buffer for each neuron, accessed through Params.Com.ReadIdx, WriteIdx -- scale * weight is added with Com delay offset -- a subslice from network PrjnGBuf. Uses int-encoded float values for faster GPU atomic integration"`
+ // Ge or Gi conductance ring buffer for each neuron, accessed through Params.Com.ReadIdx, WriteIdx -- scale * weight is added with Com delay offset -- a subslice from network PrjnGBuf. Uses int-encoded float values for faster GPU atomic integration
+ GBuf []int32 `view:"-"`
- // [view: -] [RecvNeurons][MaxData] projection-level synaptic conductance values, integrated by prjn before being integrated at the neuron level, which enables the neuron to perform non-linear integration as needed -- a subslice from network PrjnGSyn.
- GSyns []float32 `view:"-" desc:"[RecvNeurons][MaxData] projection-level synaptic conductance values, integrated by prjn before being integrated at the neuron level, which enables the neuron to perform non-linear integration as needed -- a subslice from network PrjnGSyn."`
+ // projection-level synaptic conductance values, integrated by prjn before being integrated at the neuron level, which enables the neuron to perform non-linear integration as needed -- a subslice from network PrjnGSyn.
+ GSyns []float32 `view:"-"`
}
// emer.Prjn interface
diff --git a/axon/prjnparams.go b/axon/prjnparams.go
index 6994bbdc4..69096b7c6 100644
--- a/axon/prjnparams.go
+++ b/axon/prjnparams.go
@@ -8,7 +8,7 @@ import (
"encoding/json"
"strings"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: hlsl prjnparams
@@ -31,10 +31,10 @@ import (
type StartN struct {
// starting offset
- Start uint32 `desc:"starting offset"`
+ Start uint32
- // number of items -- [Start:Start+N]
- N uint32 `desc:"number of items -- [Start:Start+N]"`
+ // number of items --
+ N uint32
pad, pad1 uint32 // todo: see if we can do without these?
}
@@ -77,10 +77,10 @@ func (pi *PrjnIdxs) SendNIdxToLayIdx(ni uint32) uint32 {
type GScaleVals struct {
// scaling factor for integrating synaptic input conductances (G's), originally computed as a function of sending layer activity and number of connections, and typically adapted from there -- see Prjn.PrjnScale adapt params
- Scale float32 `inactive:"+" desc:"scaling factor for integrating synaptic input conductances (G's), originally computed as a function of sending layer activity and number of connections, and typically adapted from there -- see Prjn.PrjnScale adapt params"`
+ Scale float32 `inactive:"+"`
// normalized relative proportion of total receiving conductance for this projection: PrjnScale.Rel / sum(PrjnScale.Rel across relevant prjns)
- Rel float32 `inactive:"+" desc:"normalized relative proportion of total receiving conductance for this projection: PrjnScale.Rel / sum(PrjnScale.Rel across relevant prjns)"`
+ Rel float32 `inactive:"+"`
pad, pad1 float32
}
@@ -91,39 +91,39 @@ type GScaleVals struct {
type PrjnParams struct {
// functional type of prjn -- determines functional code path for specialized layer types, and is synchronized with the Prjn.Typ value
- PrjnType PrjnTypes `desc:"functional type of prjn -- determines functional code path for specialized layer types, and is synchronized with the Prjn.Typ value"`
+ PrjnType PrjnTypes
pad, pad1, pad2 int32
- // [view: -] recv and send neuron-level projection index array access info
- Idxs PrjnIdxs `view:"-" desc:"recv and send neuron-level projection index array access info"`
+ // recv and send neuron-level projection index array access info
+ Idxs PrjnIdxs `view:"-"`
- // [view: inline] synaptic communication parameters: delay, probability of failure
- Com SynComParams `view:"inline" desc:"synaptic communication parameters: delay, probability of failure"`
+ // synaptic communication parameters: delay, probability of failure
+ Com SynComParams `view:"inline"`
- // [view: inline] projection scaling parameters for computing GScale: modulates overall strength of projection, using both absolute and relative factors, with adaptation option to maintain target max conductances
- PrjnScale PrjnScaleParams `view:"inline" desc:"projection scaling parameters for computing GScale: modulates overall strength of projection, using both absolute and relative factors, with adaptation option to maintain target max conductances"`
+ // projection scaling parameters for computing GScale: modulates overall strength of projection, using both absolute and relative factors, with adaptation option to maintain target max conductances
+ PrjnScale PrjnScaleParams `view:"inline"`
- // [view: add-fields] slowly adapting, structural weight value parameters, which control initial weight values and slower outer-loop adjustments
- SWts SWtParams `view:"add-fields" desc:"slowly adapting, structural weight value parameters, which control initial weight values and slower outer-loop adjustments"`
+ // slowly adapting, structural weight value parameters, which control initial weight values and slower outer-loop adjustments
+ SWts SWtParams `view:"add-fields"`
- // [view: add-fields] synaptic-level learning parameters for learning in the fast LWt values.
- Learn LearnSynParams `view:"add-fields" desc:"synaptic-level learning parameters for learning in the fast LWt values."`
+ // synaptic-level learning parameters for learning in the fast LWt values.
+ Learn LearnSynParams `view:"add-fields"`
- // [view: inline] conductance scaling values
- GScale GScaleVals `view:"inline" desc:"conductance scaling values"`
+ // conductance scaling values
+ GScale GScaleVals `view:"inline"`
- // [view: inline] [viewif: PrjnType=[RWPrjn,TDPredPrjn]] Params for RWPrjn and TDPredPrjn for doing dopamine-modulated learning for reward prediction: Da * Send activity. Use in RWPredLayer or TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.
- RLPred RLPredPrjnParams `viewif:"PrjnType=[RWPrjn,TDPredPrjn]" view:"inline" desc:"Params for RWPrjn and TDPredPrjn for doing dopamine-modulated learning for reward prediction: Da * Send activity. Use in RWPredLayer or TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only."`
+ // ] Params for RWPrjn and TDPredPrjn for doing dopamine-modulated learning for reward prediction: Da * Send activity. Use in RWPredLayer or TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.
+ RLPred RLPredPrjnParams `viewif:"PrjnType=[RWPrjn,TDPredPrjn]" view:"inline"`
- // [view: inline] [viewif: PrjnType=MatrixPrjn] for trace-based learning in the MatrixPrjn. A trace of synaptic co-activity is formed, and then modulated by dopamine whenever it occurs. This bridges the temporal gap between gating activity and subsequent activity, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level from CINs.
- Matrix MatrixPrjnParams `viewif:"PrjnType=MatrixPrjn" view:"inline" desc:"for trace-based learning in the MatrixPrjn. A trace of synaptic co-activity is formed, and then modulated by dopamine whenever it occurs. This bridges the temporal gap between gating activity and subsequent activity, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level from CINs."`
+ // for trace-based learning in the MatrixPrjn. A trace of synaptic co-activity is formed, and then modulated by dopamine whenever it occurs. This bridges the temporal gap between gating activity and subsequent activity, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level from CINs.
+ Matrix MatrixPrjnParams `viewif:"PrjnType=MatrixPrjn" view:"inline"`
- // [view: inline] [viewif: PrjnType=BLAPrjn] Basolateral Amygdala projection parameters.
- BLA BLAPrjnParams `viewif:"PrjnType=BLAPrjn" view:"inline" desc:"Basolateral Amygdala projection parameters."`
+ // Basolateral Amygdala projection parameters.
+ BLA BLAPrjnParams `viewif:"PrjnType=BLAPrjn" view:"inline"`
- // [view: inline] [viewif: PrjnType=HipPrjn] Hip bench parameters.
- Hip HipPrjnParams `viewif:"PrjnType=HipPrjn" view:"inline" desc:"Hip bench parameters."`
+ // Hip bench parameters.
+ Hip HipPrjnParams `viewif:"PrjnType=HipPrjn" view:"inline"`
}
func (pj *PrjnParams) Defaults() {
diff --git a/axon/pvlv.go b/axon/pvlv.go
index 9a8e6cf48..da0c9c8ad 100644
--- a/axon/pvlv.go
+++ b/axon/pvlv.go
@@ -5,9 +5,9 @@
package axon
import (
- "github.com/emer/emergent/erand"
- "github.com/goki/ki/bools"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/erand"
+ "goki.dev/glop/num"
+ "goki.dev/mat32/v2"
)
// DriveParams manages the drive parameters for computing and updating drive state.
@@ -17,19 +17,19 @@ import (
type DriveParams struct {
// minimum effective drive value -- this is an automatic baseline ensuring that a positive US results in at least some minimal level of reward. Unlike Base values, this is not reflected in the activity of the drive values -- applies at the time of reward calculation as a minimum baseline.
- DriveMin float32 `desc:"minimum effective drive value -- this is an automatic baseline ensuring that a positive US results in at least some minimal level of reward. Unlike Base values, this is not reflected in the activity of the drive values -- applies at the time of reward calculation as a minimum baseline."`
+ DriveMin float32
// baseline levels for each drive -- what they naturally trend toward in the absence of any input. Set inactive drives to 0 baseline, active ones typically elevated baseline (0-1 range).
- Base []float32 `desc:"baseline levels for each drive -- what they naturally trend toward in the absence of any input. Set inactive drives to 0 baseline, active ones typically elevated baseline (0-1 range)."`
+ Base []float32
// time constants in ThetaCycle (trial) units for natural update toward Base values -- 0 values means no natural update.
- Tau []float32 `desc:"time constants in ThetaCycle (trial) units for natural update toward Base values -- 0 values means no natural update."`
+ Tau []float32
// decrement in drive value when US is consumed, thus partially satisfying the drive -- positive values are subtracted from current Drive value.
- Satisfaction []float32 `desc:"decrement in drive value when US is consumed, thus partially satisfying the drive -- positive values are subtracted from current Drive value."`
+ Satisfaction []float32
- // [view: -] 1/Tau
- Dt []float32 `view:"-" desc:"1/Tau"`
+ // 1/Tau
+ Dt []float32 `view:"-"`
}
func (dp *DriveParams) Alloc(nDrives int) {
@@ -151,13 +151,13 @@ func (dp *DriveParams) EffectiveDrive(ctx *Context, di uint32, i uint32) float32
type UrgencyParams struct {
// value of raw urgency where the urgency activation level is 50%
- U50 float32 `desc:"value of raw urgency where the urgency activation level is 50%"`
+ U50 float32
- // [def: 4] exponent on the urge factor -- valid numbers are 1,2,4,6
- Power int32 `def:"4" desc:"exponent on the urge factor -- valid numbers are 1,2,4,6"`
+ // exponent on the urge factor -- valid numbers are 1,2,4,6
+ Power int32 `def:"4"`
- // [def: 0.2] threshold for urge -- cuts off small baseline values
- Thr float32 `def:"0.2" desc:"threshold for urge -- cuts off small baseline values"`
+ // threshold for urge -- cuts off small baseline values
+ Thr float32 `def:"0.2"`
}
func (ur *UrgencyParams) Defaults() {
@@ -220,26 +220,26 @@ func PVLVNormFun(raw float32) float32 {
// weighted and integrated to compute an overall PV primary value.
type USParams struct {
- // [def: 0.5] threshold for a negative US increment, _after_ multiplying by the USnegGains factor for that US (to allow for normalized input magnitudes that may translate into different magnitude of effects), to drive a phasic ACh response and associated VSMatrix gating and dopamine firing -- i.e., a full negative US outcome event (global NegUSOutcome flag is set)
- NegUSOutcomeThr float32 `def:"0.5" desc:"threshold for a negative US increment, _after_ multiplying by the USnegGains factor for that US (to allow for normalized input magnitudes that may translate into different magnitude of effects), to drive a phasic ACh response and associated VSMatrix gating and dopamine firing -- i.e., a full negative US outcome event (global NegUSOutcome flag is set)"`
+ // threshold for a negative US increment, _after_ multiplying by the USnegGains factor for that US (to allow for normalized input magnitudes that may translate into different magnitude of effects), to drive a phasic ACh response and associated VSMatrix gating and dopamine firing -- i.e., a full negative US outcome event (global NegUSOutcome flag is set)
+ NegUSOutcomeThr float32 `def:"0.5"`
- // [def: 2] gain factor applied to sum of weighted, drive-scaled positive USs to compute PVpos primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust the overall scaling of PVpos reward within 0-1 normalized range (see also PVnegGain). Each USpos is assumed to be in 0-1 range, default 1.
- PVposGain float32 `def:"2" desc:"gain factor applied to sum of weighted, drive-scaled positive USs to compute PVpos primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust the overall scaling of PVpos reward within 0-1 normalized range (see also PVnegGain). Each USpos is assumed to be in 0-1 range, default 1."`
+ // gain factor applied to sum of weighted, drive-scaled positive USs to compute PVpos primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust the overall scaling of PVpos reward within 0-1 normalized range (see also PVnegGain). Each USpos is assumed to be in 0-1 range, default 1.
+ PVposGain float32 `def:"2"`
- // [def: 1] gain factor applied to sum of weighted negative USs to compute PVneg primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust overall scaling of PVneg within 0-1 normalized range (see also PVposGain).
- PVnegGain float32 `def:"1" desc:"gain factor applied to sum of weighted negative USs to compute PVneg primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust overall scaling of PVneg within 0-1 normalized range (see also PVposGain)."`
+ // gain factor applied to sum of weighted negative USs to compute PVneg primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust overall scaling of PVneg within 0-1 normalized range (see also PVposGain).
+ PVnegGain float32 `def:"1"`
// gain factor for each individual negative US, multiplied prior to 1/(1+x) normalization of each term for activating the OFCnegUS pools. These gains are _not_ applied in computing summary PVneg value (see PVnegWts), and generally must be larger than the weights to leverage the dynamic range within each US pool.
- USnegGains []float32 `desc:"gain factor for each individual negative US, multiplied prior to 1/(1+x) normalization of each term for activating the OFCnegUS pools. These gains are _not_ applied in computing summary PVneg value (see PVnegWts), and generally must be larger than the weights to leverage the dynamic range within each US pool."`
+ USnegGains []float32
// weight factor applied to each separate positive US on the way to computing the overall PVpos summary value, to control the weighting of each US relative to the others. Each pos US is also multiplied by its dynamic Drive factor as well. Use PVposGain to control the overall scaling of the PVpos value.
- PVposWts []float32 `desc:"weight factor applied to each separate positive US on the way to computing the overall PVpos summary value, to control the weighting of each US relative to the others. Each pos US is also multiplied by its dynamic Drive factor as well. Use PVposGain to control the overall scaling of the PVpos value."`
+ PVposWts []float32
// weight factor applied to each separate negative US on the way to computing the overall PVneg summary value, to control the weighting of each US relative to the others. The first pool is Time, second is Effort, and these are typically weighted lower (.02) than salient simulation-specific USs (1).
- PVnegWts []float32 `desc:"weight factor applied to each separate negative US on the way to computing the overall PVneg summary value, to control the weighting of each US relative to the others. The first pool is Time, second is Effort, and these are typically weighted lower (.02) than salient simulation-specific USs (1)."`
+ PVnegWts []float32
// computed estimated US values, based on OFCposUSPT and VSMatrix gating, in PVposEst
- USposEst []float32 `inactive:"+" desc:"computed estimated US values, based on OFCposUSPT and VSMatrix gating, in PVposEst"`
+ USposEst []float32 `inactive:"+"`
}
func (us *USParams) Alloc(nPos, nNeg int) {
@@ -323,14 +323,14 @@ func (us *USParams) NegUSOutcome(ctx *Context, di uint32, usIdx int, mag float32
// or "relief" burst when actual neg < predicted.
type LHbParams struct {
- // [def: 1] threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward
- NegThr float32 `def:"1" desc:"threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward"`
+ // threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward
+ NegThr float32 `def:"1"`
- // [def: 1] gain multiplier on PVpos for purposes of generating bursts (not for discounting negative dips) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)
- BurstGain float32 `def:"1" desc:"gain multiplier on PVpos for purposes of generating bursts (not for discounting negative dips) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)"`
+ // gain multiplier on PVpos for purposes of generating bursts (not for discounting negative dips) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)
+ BurstGain float32 `def:"1"`
- // [def: 1] gain multiplier on PVneg for purposes of generating dips (not for discounting positive bursts) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)
- DipGain float32 `def:"1" desc:"gain multiplier on PVneg for purposes of generating dips (not for discounting positive bursts) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)"`
+ // gain multiplier on PVneg for purposes of generating dips (not for discounting positive bursts) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)
+ DipGain float32 `def:"1"`
}
func (lh *LHbParams) Defaults() {
@@ -405,14 +405,14 @@ func (lh *LHbParams) DAforNoUS(ctx *Context, di uint32, vsPatchPos float32) floa
// GiveUpParams are parameters for computing when to give up
type GiveUpParams struct {
- // [def: 1] threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward
- NegThr float32 `def:"1" desc:"threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward"`
+ // threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward
+ NegThr float32 `def:"1"`
- // [def: 10] multiplier on pos - neg for logistic probability function -- higher gain values produce more binary give up behavior and lower values produce more graded stochastic behavior around the threshold
- Gain float32 `def:"10" desc:"multiplier on pos - neg for logistic probability function -- higher gain values produce more binary give up behavior and lower values produce more graded stochastic behavior around the threshold"`
+ // multiplier on pos - neg for logistic probability function -- higher gain values produce more binary give up behavior and lower values produce more graded stochastic behavior around the threshold
+ Gain float32 `def:"10"`
// minimum estimated PVpos value -- deals with any errors in the estimation process to make sure that erroneous GiveUp doesn't happen.
- MinPVposEst float32 `desc:"minimum estimated PVpos value -- deals with any errors in the estimation process to make sure that erroneous GiveUp doesn't happen."`
+ MinPVposEst float32
}
func (gp *GiveUpParams) Defaults() {
@@ -450,25 +450,25 @@ func (gp *GiveUpParams) Prob(pvDiff float32, rnd erand.Rand) (float32, bool) {
type PVLV struct {
// number of possible positive US states and corresponding drives -- the first is always reserved for novelty / curiosity. Must be set programmatically via SetNUSs method, which allocates corresponding parameters.
- NPosUSs uint32 `inactive:"+" desc:"number of possible positive US states and corresponding drives -- the first is always reserved for novelty / curiosity. Must be set programmatically via SetNUSs method, which allocates corresponding parameters."`
+ NPosUSs uint32 `inactive:"+"`
- // number of possible negative US states -- [0] is reserved for accumulated time, [1] the accumulated effort cost. Must be set programmatically via SetNUSs method, which allocates corresponding parameters.
- NNegUSs uint32 `inactive:"+" desc:"number of possible negative US states -- [0] is reserved for accumulated time, [1] the accumulated effort cost. Must be set programmatically via SetNUSs method, which allocates corresponding parameters."`
+ // number of possible negative US states -- is reserved for accumulated time, the accumulated effort cost. Must be set programmatically via SetNUSs method, which allocates corresponding parameters.
+ NNegUSs uint32 `inactive:"+"`
// parameters and state for built-in drives that form the core motivations of agent, controlled by lateral hypothalamus and associated body state monitoring such as glucose levels and thirst.
- Drive DriveParams `desc:"parameters and state for built-in drives that form the core motivations of agent, controlled by lateral hypothalamus and associated body state monitoring such as glucose levels and thirst."`
+ Drive DriveParams
- // [view: inline] urgency (increasing pressure to do something) and parameters for updating it. Raw urgency is incremented by same units as effort, but is only reset with a positive US.
- Urgency UrgencyParams `view:"inline" desc:"urgency (increasing pressure to do something) and parameters for updating it. Raw urgency is incremented by same units as effort, but is only reset with a positive US."`
+ // urgency (increasing pressure to do something) and parameters for updating it. Raw urgency is incremented by same units as effort, but is only reset with a positive US.
+ Urgency UrgencyParams `view:"inline"`
// controls how positive and negative USs are weighted and integrated to compute an overall PV primary value.
- USs USParams `desc:"controls how positive and negative USs are weighted and integrated to compute an overall PV primary value."`
+ USs USParams
- // [view: inline] lateral habenula (LHb) parameters and state, which drives dipping / pausing in dopamine when the predicted positive outcome > actual, or actual negative outcome > predicted. Can also drive bursting for the converse, and via matrix phasic firing
- LHb LHbParams `view:"inline" desc:"lateral habenula (LHb) parameters and state, which drives dipping / pausing in dopamine when the predicted positive outcome > actual, or actual negative outcome > predicted. Can also drive bursting for the converse, and via matrix phasic firing"`
+ // lateral habenula (LHb) parameters and state, which drives dipping / pausing in dopamine when the predicted positive outcome > actual, or actual negative outcome > predicted. Can also drive bursting for the converse, and via matrix phasic firing
+ LHb LHbParams `view:"inline"`
// parameters for giving up based on PV pos - neg difference
- GiveUp GiveUpParams `desc:"parameters for giving up based on PV pos - neg difference"`
+ GiveUp GiveUpParams
}
func (pp *PVLV) Defaults() {
@@ -682,7 +682,7 @@ func (pp *PVLV) ResetGiveUp(ctx *Context, di uint32) {
// after reward.
func (pp *PVLV) NewState(ctx *Context, di uint32, rnd erand.Rand) {
hadRewF := GlbV(ctx, di, GvHasRew)
- hadRew := bools.FromFloat32(hadRewF)
+ hadRew := num.ToBool(hadRewF)
SetGlbV(ctx, di, GvHadRew, hadRewF)
SetGlbV(ctx, di, GvHadPosUS, GlbV(ctx, di, GvHasPosUS))
SetGlbV(ctx, di, GvHadNegUSOutcome, GlbV(ctx, di, GvNegUSOutcome))
@@ -760,7 +760,7 @@ func (pp *PVLV) PVsFmUSs(ctx *Context, di uint32) {
pvPosSum, pvPos := pp.PVpos(ctx, di)
SetGlbV(ctx, di, GvPVposSum, pvPosSum)
SetGlbV(ctx, di, GvPVpos, pvPos)
- SetGlbV(ctx, di, GvHasPosUS, bools.ToFloat32(pp.HasPosUS(ctx, di)))
+ SetGlbV(ctx, di, GvHasPosUS, num.FromBool[float32](pp.HasPosUS(ctx, di)))
pvNegSum, pvNeg := pp.PVneg(ctx, di)
SetGlbV(ctx, di, GvPVnegSum, pvNegSum)
@@ -876,7 +876,7 @@ func (pp *PVLV) GiveUpFmPV(ctx *Context, di uint32, pvNeg float32, rnd erand.Ran
SetGlbV(ctx, di, GvPVposEstDisc, posDisc)
SetGlbV(ctx, di, GvGiveUpDiff, diff)
SetGlbV(ctx, di, GvGiveUpProb, prob)
- SetGlbV(ctx, di, GvGiveUp, bools.ToFloat32(giveUp))
+ SetGlbV(ctx, di, GvGiveUp, num.FromBool[float32](giveUp))
return giveUp
}
diff --git a/axon/pvlv_layers.go b/axon/pvlv_layers.go
index 09b32df27..e48d16d2a 100644
--- a/axon/pvlv_layers.go
+++ b/axon/pvlv_layers.go
@@ -9,7 +9,7 @@ import (
"strings"
"github.com/goki/gosl/slbool"
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: start pvlv_layers
@@ -18,29 +18,29 @@ import (
// as a function of the MAX activation of its inputs.
type LDTParams struct {
- // [def: 0.05] threshold per input source, on absolute value (magnitude), to count as a significant reward event, which then drives maximal ACh -- set to 0 to disable this nonlinear behavior
- SrcThr float32 `def:"0.05" desc:"threshold per input source, on absolute value (magnitude), to count as a significant reward event, which then drives maximal ACh -- set to 0 to disable this nonlinear behavior"`
+ // threshold per input source, on absolute value (magnitude), to count as a significant reward event, which then drives maximal ACh -- set to 0 to disable this nonlinear behavior
+ SrcThr float32 `def:"0.05"`
- // [def: true] use the global Context.NeuroMod.HasRew flag -- if there is some kind of external reward being given, then ACh goes to 1, else 0 for this component
- Rew slbool.Bool `def:"true" desc:"use the global Context.NeuroMod.HasRew flag -- if there is some kind of external reward being given, then ACh goes to 1, else 0 for this component"`
+ // use the global Context.NeuroMod.HasRew flag -- if there is some kind of external reward being given, then ACh goes to 1, else 0 for this component
+ Rew slbool.Bool `def:"true"`
- // [def: 2] extent to which active maintenance (via Context.NeuroMod.NotMaint PTNotMaintLayer activity) inhibits ACh signals -- when goal engaged, distractability is lower.
- MaintInhib float32 `def:"2" desc:"extent to which active maintenance (via Context.NeuroMod.NotMaint PTNotMaintLayer activity) inhibits ACh signals -- when goal engaged, distractability is lower."`
+ // extent to which active maintenance (via Context.NeuroMod.NotMaint PTNotMaintLayer activity) inhibits ACh signals -- when goal engaged, distractability is lower.
+ MaintInhib float32 `def:"2"`
- // [def: 0.4] maximum NeuroMod.NotMaint activity for computing Maint as 1-NotMaint -- when NotMaint is >= NotMaintMax, then Maint = 0.
- NotMaintMax float32 `def:"0.4" desc:"maximum NeuroMod.NotMaint activity for computing Maint as 1-NotMaint -- when NotMaint is >= NotMaintMax, then Maint = 0."`
+ // maximum NeuroMod.NotMaint activity for computing Maint as 1-NotMaint -- when NotMaint is >= NotMaintMax, then Maint = 0.
+ NotMaintMax float32 `def:"0.4"`
// idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay1Name if present -- -1 if not used
- SrcLay1Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay1Name if present -- -1 if not used"`
+ SrcLay1Idx int32 `inactive:"+"`
// idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay2Name if present -- -1 if not used
- SrcLay2Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay2Name if present -- -1 if not used"`
+ SrcLay2Idx int32 `inactive:"+"`
// idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay3Name if present -- -1 if not used
- SrcLay3Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay3Name if present -- -1 if not used"`
+ SrcLay3Idx int32 `inactive:"+"`
// idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay4Name if present -- -1 if not used
- SrcLay4Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay4Name if present -- -1 if not used"`
+ SrcLay4Idx int32 `inactive:"+"`
}
func (lp *LDTParams) Defaults() {
@@ -109,17 +109,17 @@ func (lp *LDTParams) ACh(ctx *Context, di uint32, srcLay1Act, srcLay2Act, srcLay
// VSPatchParams parameters for VSPatch learning
type VSPatchParams struct {
- // [def: 3] multiplier applied after Thr threshold
- Gain float32 `def:"3" desc:"multiplier applied after Thr threshold"`
+ // multiplier applied after Thr threshold
+ Gain float32 `def:"3"`
- // [def: 0.15] initial value for overall threshold, which adapts over time -- stored in LayerVals.ActAvgVals.AdaptThr
- ThrInit float32 `def:"0.15" desc:"initial value for overall threshold, which adapts over time -- stored in LayerVals.ActAvgVals.AdaptThr"`
+ // initial value for overall threshold, which adapts over time -- stored in LayerVals.ActAvgVals.AdaptThr
+ ThrInit float32 `def:"0.15"`
- // [def: 0,0.002] learning rate for the threshold -- moves in proportion to same predictive error signal that drives synaptic learning
- ThrLRate float32 `def:"0,0.002" desc:"learning rate for the threshold -- moves in proportion to same predictive error signal that drives synaptic learning"`
+ // learning rate for the threshold -- moves in proportion to same predictive error signal that drives synaptic learning
+ ThrLRate float32 `def:"0,0.002"`
- // [def: 10] extra gain factor for non-reward trials, which is the most critical
- ThrNonRew float32 `def:"10" desc:"extra gain factor for non-reward trials, which is the most critical"`
+ // extra gain factor for non-reward trials, which is the most critical
+ ThrNonRew float32 `def:"10"`
}
func (vp *VSPatchParams) Defaults() {
@@ -149,11 +149,11 @@ func (vp *VSPatchParams) ThrVal(act, thr float32) float32 {
// every cycle.
type VTAParams struct {
- // [def: 0.75] gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values
- CeMGain float32 `def:"0.75" desc:"gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values"`
+ // gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values
+ CeMGain float32 `def:"0.75"`
- // [def: 1.25] gain on computed LHb DA (Burst - Dip) -- for controlling DA levels
- LHbGain float32 `def:"1.25" desc:"gain on computed LHb DA (Burst - Dip) -- for controlling DA levels"`
+ // gain on computed LHb DA (Burst - Dip) -- for controlling DA levels
+ LHbGain float32 `def:"1.25"`
pad, pad1 float32
}
diff --git a/axon/pvlv_net.go b/axon/pvlv_net.go
index 9dc8bbf5e..a58395dda 100644
--- a/axon/pvlv_net.go
+++ b/axon/pvlv_net.go
@@ -5,9 +5,9 @@
package axon
import (
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
)
// AddLDTLayer adds a LDTLayer
diff --git a/axon/rl_layers.go b/axon/rl_layers.go
index da5a61d96..8fe1076db 100644
--- a/axon/rl_layers.go
+++ b/axon/rl_layers.go
@@ -5,7 +5,7 @@
package axon
import (
- "github.com/emer/etable/minmax"
+ "goki.dev/etable/v2/minmax"
)
//gosl: start rl_layers
@@ -15,7 +15,7 @@ import (
type RWPredParams struct {
// default 0.1..0.99 range of predictions that can be represented -- having a truncated range preserves some sensitivity in dopamine at the extremes of good or poor performance
- PredRange minmax.F32 `desc:"default 0.1..0.99 range of predictions that can be represented -- having a truncated range preserves some sensitivity in dopamine at the extremes of good or poor performance"`
+ PredRange minmax.F32
}
func (rp *RWPredParams) Defaults() {
@@ -30,10 +30,10 @@ func (rp *RWPredParams) Update() {
type RWDaParams struct {
// tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value
- TonicGe float32 `desc:"tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value"`
+ TonicGe float32
// idx of RWPredLayer to get reward prediction from -- set during Build from BuildConfig RWPredLayName
- RWPredLayIdx int32 `inactive:"+" desc:"idx of RWPredLayer to get reward prediction from -- set during Build from BuildConfig RWPredLayName"`
+ RWPredLayIdx int32 `inactive:"+"`
pad, pad1 uint32
}
@@ -58,13 +58,13 @@ func (rp *RWDaParams) GeFmDA(da float32) float32 {
type TDIntegParams struct {
// discount factor -- how much to discount the future prediction from TDPred
- Discount float32 `desc:"discount factor -- how much to discount the future prediction from TDPred"`
+ Discount float32
// gain factor on TD rew pred activations
- PredGain float32 `desc:"gain factor on TD rew pred activations"`
+ PredGain float32
// idx of TDPredLayer to get reward prediction from -- set during Build from BuildConfig TDPredLayName
- TDPredLayIdx int32 `inactive:"+" desc:"idx of TDPredLayer to get reward prediction from -- set during Build from BuildConfig TDPredLayName"`
+ TDPredLayIdx int32 `inactive:"+"`
pad uint32
}
@@ -82,10 +82,10 @@ func (tp *TDIntegParams) Update() {
type TDDaParams struct {
// tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value
- TonicGe float32 `desc:"tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value"`
+ TonicGe float32
// idx of TDIntegLayer to get reward prediction from -- set during Build from BuildConfig TDIntegLayName
- TDIntegLayIdx int32 `inactive:"+" desc:"idx of TDIntegLayer to get reward prediction from -- set during Build from BuildConfig TDIntegLayName"`
+ TDIntegLayIdx int32 `inactive:"+"`
pad, pad1 uint32
}
diff --git a/axon/rl_net.go b/axon/rl_net.go
index a5c5c2377..d5e1591bf 100644
--- a/axon/rl_net.go
+++ b/axon/rl_net.go
@@ -5,8 +5,8 @@
package axon
import (
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
)
// AddRewLayer adds a RewLayer of given name
diff --git a/axon/threads.go b/axon/threads.go
index 41535a85f..ffcdceae0 100644
--- a/axon/threads.go
+++ b/axon/threads.go
@@ -11,7 +11,7 @@ import (
"sort"
"sync"
- "github.com/emer/emergent/timer"
+ "github.com/emer/emergent/v2/timer"
"github.com/goki/ki/atomctr"
"github.com/goki/ki/ints"
)
diff --git a/axon/threads_test.go b/axon/threads_test.go
index 46ef3794f..e55339326 100644
--- a/axon/threads_test.go
+++ b/axon/threads_test.go
@@ -11,14 +11,14 @@ import (
"math/rand"
"testing"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
"github.com/goki/ki/ints"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
)
const (
diff --git a/chans/ak.go b/chans/ak.go
index 824d100d8..6d3f157b2 100644
--- a/chans/ak.go
+++ b/chans/ak.go
@@ -4,7 +4,7 @@
package chans
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
// AKParams control an A-type K channel, which is voltage gated with maximal
// activation around -37 mV. It has two state variables, M (v-gated opening)
@@ -17,23 +17,23 @@ import "github.com/goki/mat32"
// not simulated, as in our standard axon models.
type AKParams struct {
- // [def: 1,0.1,0.01] strength of AK current
- Gbar float32 `def:"1,0.1,0.01" desc:"strength of AK current"`
+ // strength of AK current
+ Gbar float32 `def:"1,0.1,0.01"`
- // [def: 0.01446,02039] [viewif: Gbar>0] multiplier for the beta term; 0.01446 for distal, 0.02039 for proximal dendrites
- Beta float32 `viewif:"Gbar>0" def:"0.01446,02039" desc:"multiplier for the beta term; 0.01446 for distal, 0.02039 for proximal dendrites"`
+ // multiplier for the beta term; 0.01446 for distal, 0.02039 for proximal dendrites
+ Beta float32 `viewif:"Gbar>0" def:"0.01446,02039"`
- // [def: 0.5,0.25] [viewif: Gbar>0] Dm factor: 0.5 for distal, 0.25 for proximal
- Dm float32 `viewif:"Gbar>0" def:"0.5,0.25" desc:"Dm factor: 0.5 for distal, 0.25 for proximal"`
+ // Dm factor: 0.5 for distal, 0.25 for proximal
+ Dm float32 `viewif:"Gbar>0" def:"0.5,0.25"`
- // [def: 1.8,1.5] [viewif: Gbar>0] offset for K, 1.8 for distal, 1.5 for proximal
- Koff float32 `viewif:"Gbar>0" def:"1.8,1.5" desc:"offset for K, 1.8 for distal, 1.5 for proximal"`
+ // offset for K, 1.8 for distal, 1.5 for proximal
+ Koff float32 `viewif:"Gbar>0" def:"1.8,1.5"`
- // [def: 1,11] [viewif: Gbar>0] voltage offset for alpha and beta functions: 1 for distal, 11 for proximal
- Voff float32 `viewif:"Gbar>0" def:"1,11" desc:"voltage offset for alpha and beta functions: 1 for distal, 11 for proximal"`
+ // voltage offset for alpha and beta functions: 1 for distal, 11 for proximal
+ Voff float32 `viewif:"Gbar>0" def:"1,11"`
- // [def: 0.1133,0.1112] [viewif: Gbar>0] h multiplier factor, 0.1133 for distal, 0.1112 for proximal
- Hf float32 `viewif:"Gbar>0" def:"0.1133,0.1112" desc:"h multiplier factor, 0.1133 for distal, 0.1112 for proximal"`
+ // h multiplier factor, 0.1133 for distal, 0.1112 for proximal
+ Hf float32 `viewif:"Gbar>0" def:"0.1133,0.1112"`
pad, pad1 float32
}
@@ -139,19 +139,19 @@ func (ap *AKParams) Gak(m, h float32) float32 {
// voltage gated calcium channels which can otherwise drive runaway excitatory currents.
type AKsParams struct {
- // [def: 2,0.1,0.01] strength of AK current
- Gbar float32 `def:"2,0.1,0.01" desc:"strength of AK current"`
+ // strength of AK current
+ Gbar float32 `def:"2,0.1,0.01"`
- // [def: 0.076] [viewif: Gbar>0] H factor as a constant multiplier on overall M factor result -- rescales M to level consistent with H being present at full strength
- Hf float32 `viewif:"Gbar>0" def:"0.076" desc:"H factor as a constant multiplier on overall M factor result -- rescales M to level consistent with H being present at full strength"`
+ // H factor as a constant multiplier on overall M factor result -- rescales M to level consistent with H being present at full strength
+ Hf float32 `viewif:"Gbar>0" def:"0.076"`
- // [def: 0.075] [viewif: Gbar>0] multiplier for M -- determines slope of function
- Mf float32 `viewif:"Gbar>0" def:"0.075" desc:"multiplier for M -- determines slope of function"`
+ // multiplier for M -- determines slope of function
+ Mf float32 `viewif:"Gbar>0" def:"0.075"`
- // [def: 2] [viewif: Gbar>0] voltage offset in biological units for M function
- Voff float32 `viewif:"Gbar>0" def:"2" desc:"voltage offset in biological units for M function"`
+ // voltage offset in biological units for M function
+ Voff float32 `viewif:"Gbar>0" def:"2"`
- // [viewif: Gbar>0]
+ //
Vmax float32 `viewif:"Gbar>0" def:-37" desc:"voltage level of maximum channel opening -- stays flat above that"`
pad, pad1, pad2 int32
diff --git a/chans/gabab.go b/chans/gabab.go
index 626a2a1eb..37cf70e53 100644
--- a/chans/gabab.go
+++ b/chans/gabab.go
@@ -5,7 +5,7 @@
package chans
import (
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: start chans
@@ -14,32 +14,32 @@ import (
// based on Brunel & Wang (2001) parameters.
type GABABParams struct {
- // [def: 0,0.012,0.015] overall strength multiplier of GABA-B current. The 0.015 default is a high value that works well in smaller networks -- larger networks may benefit from lower levels (e.g., 0.012).
- Gbar float32 `def:"0,0.012,0.015" desc:"overall strength multiplier of GABA-B current. The 0.015 default is a high value that works well in smaller networks -- larger networks may benefit from lower levels (e.g., 0.012)."`
+ // overall strength multiplier of GABA-B current. The 0.015 default is a high value that works well in smaller networks -- larger networks may benefit from lower levels (e.g., 0.012).
+ Gbar float32 `def:"0,0.012,0.015"`
- // [def: 45] [viewif: Gbar>0] rise time for bi-exponential time dynamics of GABA-B
- RiseTau float32 `viewif:"Gbar>0" def:"45" desc:"rise time for bi-exponential time dynamics of GABA-B"`
+ // rise time for bi-exponential time dynamics of GABA-B
+ RiseTau float32 `viewif:"Gbar>0" def:"45"`
- // [def: 50] [viewif: Gbar>0] decay time for bi-exponential time dynamics of GABA-B
- DecayTau float32 `viewif:"Gbar>0" def:"50" desc:"decay time for bi-exponential time dynamics of GABA-B"`
+ // decay time for bi-exponential time dynamics of GABA-B
+ DecayTau float32 `viewif:"Gbar>0" def:"50"`
- // [def: 0.2] [viewif: Gbar>0] baseline level of GABA-B channels open independent of inhibitory input (is added to spiking-produced conductance)
- Gbase float32 `viewif:"Gbar>0" def:"0.2" desc:"baseline level of GABA-B channels open independent of inhibitory input (is added to spiking-produced conductance)"`
+ // baseline level of GABA-B channels open independent of inhibitory input (is added to spiking-produced conductance)
+ Gbase float32 `viewif:"Gbar>0" def:"0.2"`
- // [def: 10] [viewif: Gbar>0] multiplier for converting Gi to equivalent GABA spikes
- GiSpike float32 `viewif:"Gbar>0" def:"10" desc:"multiplier for converting Gi to equivalent GABA spikes"`
+ // multiplier for converting Gi to equivalent GABA spikes
+ GiSpike float32 `viewif:"Gbar>0" def:"10"`
- // [viewif: Gbar>0] time offset when peak conductance occurs, in msec, computed from RiseTau and DecayTau
- MaxTime float32 `viewif:"Gbar>0" inactive:"+" desc:"time offset when peak conductance occurs, in msec, computed from RiseTau and DecayTau"`
+ // time offset when peak conductance occurs, in msec, computed from RiseTau and DecayTau
+ MaxTime float32 `viewif:"Gbar>0" inactive:"+"`
- // [view: -] time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))
- TauFact float32 `view:"-" desc:"time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))"`
+ // time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))
+ TauFact float32 `view:"-"`
- // [view: -] 1/Tau
- RiseDt float32 `view:"-" inactive:"+" desc:"1/Tau"`
+ // 1/Tau
+ RiseDt float32 `view:"-" inactive:"+"`
- // [view: -] 1/Tau
- DecayDt float32 `view:"-" inactive:"+" desc:"1/Tau"`
+ // 1/Tau
+ DecayDt float32 `view:"-" inactive:"+"`
pad, pad1, pad2 float32
}
diff --git a/chans/mahp.go b/chans/mahp.go
index b7560211c..06d0da11d 100644
--- a/chans/mahp.go
+++ b/chans/mahp.go
@@ -4,7 +4,7 @@
package chans
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
//gosl: start chans
@@ -18,22 +18,22 @@ import "github.com/goki/mat32"
type MahpParams struct {
// strength of mAHP current
- Gbar float32 `desc:"strength of mAHP current"`
+ Gbar float32
- // [def: -30] [viewif: Gbar>0] voltage offset (threshold) in biological units for infinite time N gating function -- where the gate is at 50% strength
- Voff float32 `viewif:"Gbar>0" def:"-30" desc:"voltage offset (threshold) in biological units for infinite time N gating function -- where the gate is at 50% strength"`
+ // voltage offset (threshold) in biological units for infinite time N gating function -- where the gate is at 50% strength
+ Voff float32 `viewif:"Gbar>0" def:"-30"`
- // [def: 9] [viewif: Gbar>0] slope of the arget (infinite time) gating function
- Vslope float32 `viewif:"Gbar>0" def:"9" desc:"slope of the arget (infinite time) gating function"`
+ // slope of the arget (infinite time) gating function
+ Vslope float32 `viewif:"Gbar>0" def:"9"`
- // [def: 1000] [viewif: Gbar>0] maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp
- TauMax float32 `viewif:"Gbar>0" def:"1000" desc:"maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp"`
+ // maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp
+ TauMax float32 `viewif:"Gbar>0" def:"1000"`
- // [view: -] [viewif: Gbar>0] temperature adjustment factor: assume temp = 37 C, whereas original units were at 23 C
- Tadj float32 `viewif:"Gbar>0" view:"-" inactive:"+" desc:"temperature adjustment factor: assume temp = 37 C, whereas original units were at 23 C"`
+ // temperature adjustment factor: assume temp = 37 C, whereas original units were at 23 C
+ Tadj float32 `viewif:"Gbar>0" view:"-" inactive:"+"`
- // [view: -] 1/Tau
- DtMax float32 `view:"-" inactive:"+" desc:"1/Tau"`
+ // 1/Tau
+ DtMax float32 `view:"-" inactive:"+"`
pad, pad2 int32
}
diff --git a/chans/nmda.go b/chans/nmda.go
index dcb93f520..160391721 100644
--- a/chans/nmda.go
+++ b/chans/nmda.go
@@ -4,7 +4,7 @@
package chans
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
//gosl: start chans
@@ -15,29 +15,29 @@ import "github.com/goki/mat32"
// increments
type NMDAParams struct {
- // [def: 0,0.006,0.007] overall multiplier for strength of NMDA current -- multiplies GnmdaSyn to get net conductance.
- Gbar float32 `def:"0,0.006,0.007" desc:"overall multiplier for strength of NMDA current -- multiplies GnmdaSyn to get net conductance."`
+ // overall multiplier for strength of NMDA current -- multiplies GnmdaSyn to get net conductance.
+ Gbar float32 `def:"0,0.006,0.007"`
- // [def: 30,50,100,200,300] [viewif: Gbar>0] decay time constant for NMDA channel activation -- rise time is 2 msec and not worth extra effort for biexponential. 30 fits the Urakubo et al (2008) model with ITau = 100, but 100 works better in practice is small networks so far.
- Tau float32 `viewif:"Gbar>0" def:"30,50,100,200,300" desc:"decay time constant for NMDA channel activation -- rise time is 2 msec and not worth extra effort for biexponential. 30 fits the Urakubo et al (2008) model with ITau = 100, but 100 works better in practice is small networks so far."`
+ // decay time constant for NMDA channel activation -- rise time is 2 msec and not worth extra effort for biexponential. 30 fits the Urakubo et al (2008) model with ITau = 100, but 100 works better in practice is small networks so far.
+ Tau float32 `viewif:"Gbar>0" def:"30,50,100,200,300"`
- // [def: 1,100] [viewif: Gbar>0] decay time constant for NMDA channel inhibition, which captures the Urakubo et al (2008) allosteric dynamics (100 fits their model well) -- set to 1 to eliminate that mechanism.
- ITau float32 `viewif:"Gbar>0" def:"1,100" desc:"decay time constant for NMDA channel inhibition, which captures the Urakubo et al (2008) allosteric dynamics (100 fits their model well) -- set to 1 to eliminate that mechanism."`
+ // decay time constant for NMDA channel inhibition, which captures the Urakubo et al (2008) allosteric dynamics (100 fits their model well) -- set to 1 to eliminate that mechanism.
+ ITau float32 `viewif:"Gbar>0" def:"1,100"`
- // [def: 1:1.5] [viewif: Gbar>0] magnesium ion concentration: Brunel & Wang (2001) and Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990). Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far in large models, 1.2, Voff = 0 best in smaller nets.
- MgC float32 `viewif:"Gbar>0" def:"1:1.5" desc:"magnesium ion concentration: Brunel & Wang (2001) and Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990). Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far in large models, 1.2, Voff = 0 best in smaller nets."`
+ // magnesium ion concentration: Brunel & Wang (2001) and Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990). Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far in large models, 1.2, Voff = 0 best in smaller nets.
+ MgC float32 `viewif:"Gbar>0" def:"1:1.5"`
- // [def: 0] [viewif: Gbar>0] offset in membrane potential in biological units for voltage-dependent functions. 5 corresponds to the -65 mV rest, -45 threshold of the Urakubo et al (2008) model. 5 was used before in a buggy version of NMDA equation -- 0 is new default.
- Voff float32 `viewif:"Gbar>0" def:"0" desc:"offset in membrane potential in biological units for voltage-dependent functions. 5 corresponds to the -65 mV rest, -45 threshold of the Urakubo et al (2008) model. 5 was used before in a buggy version of NMDA equation -- 0 is new default."`
+ // offset in membrane potential in biological units for voltage-dependent functions. 5 corresponds to the -65 mV rest, -45 threshold of the Urakubo et al (2008) model. 5 was used before in a buggy version of NMDA equation -- 0 is new default.
+ Voff float32 `viewif:"Gbar>0" def:"0"`
- // [view: -] rate = 1 / tau
- Dt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ Dt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- IDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ IDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] MgFact = MgC / 3.57
- MgFact float32 `view:"-" json:"-" xml:"-" desc:"MgFact = MgC / 3.57"`
+ // MgFact = MgC / 3.57
+ MgFact float32 `view:"-" json:"-" xml:"-"`
}
func (np *NMDAParams) Defaults() {
diff --git a/chans/sahp.go b/chans/sahp.go
index 763ef19de..b768314b9 100644
--- a/chans/sahp.go
+++ b/chans/sahp.go
@@ -4,7 +4,7 @@
package chans
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
//gosl: start chans
@@ -18,26 +18,26 @@ import "github.com/goki/mat32"
// of the n gating value, but tau is computed in any case.
type SahpParams struct {
- // [def: 0.05,0.1] strength of sAHP current
- Gbar float32 `def:"0.05,0.1" desc:"strength of sAHP current"`
+ // strength of sAHP current
+ Gbar float32 `def:"0.05,0.1"`
- // [def: 5,10] [viewif: Gbar>0] time constant for integrating Ca across theta cycles
- CaTau float32 `viewif:"Gbar>0" def:"5,10" desc:"time constant for integrating Ca across theta cycles"`
+ // time constant for integrating Ca across theta cycles
+ CaTau float32 `viewif:"Gbar>0" def:"5,10"`
- // [def: 0.8] [viewif: Gbar>0] integrated Ca offset (threshold) for infinite time N gating function -- where the gate is at 50% strength
- Off float32 `viewif:"Gbar>0" def:"0.8" desc:"integrated Ca offset (threshold) for infinite time N gating function -- where the gate is at 50% strength"`
+ // integrated Ca offset (threshold) for infinite time N gating function -- where the gate is at 50% strength
+ Off float32 `viewif:"Gbar>0" def:"0.8"`
- // [def: 0.02] [viewif: Gbar>0] slope of the infinite time logistic gating function
- Slope float32 `viewif:"Gbar>0" def:"0.02" desc:"slope of the infinite time logistic gating function"`
+ // slope of the infinite time logistic gating function
+ Slope float32 `viewif:"Gbar>0" def:"0.02"`
- // [def: 1] [viewif: Gbar>0] maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp
- TauMax float32 `viewif:"Gbar>0" def:"1" desc:"maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp"`
+ // maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp
+ TauMax float32 `viewif:"Gbar>0" def:"1"`
- // [view: -] 1/Tau
- CaDt float32 `view:"-" inactive:"+" desc:"1/Tau"`
+ // 1/Tau
+ CaDt float32 `view:"-" inactive:"+"`
- // [view: -] 1/Tau
- DtMax float32 `view:"-" inactive:"+" desc:"1/Tau"`
+ // 1/Tau
+ DtMax float32 `view:"-" inactive:"+"`
pad int32
}
diff --git a/chans/skca.go b/chans/skca.go
index e9da40727..98423cb7f 100644
--- a/chans/skca.go
+++ b/chans/skca.go
@@ -5,7 +5,7 @@
package chans
import (
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: start chans
@@ -23,41 +23,41 @@ import (
// (also Muddapu & Chakravarthy, 2021): X^h / (X^h + C50^h) where h ~= 4 (hard coded)
type SKCaParams struct {
- // [def: 0,2,3] overall strength of sKCa current -- inactive if 0
- Gbar float32 `def:"0,2,3" desc:"overall strength of sKCa current -- inactive if 0"`
+ // overall strength of sKCa current -- inactive if 0
+ Gbar float32 `def:"0,2,3"`
- // [def: 0.4,0.5] [viewif: Gbar>0] 50% Ca concentration baseline value in Hill equation -- set this to level that activates at reasonable levels of SKCaR
- C50 float32 `viewif:"Gbar>0" def:"0.4,0.5" desc:"50% Ca concentration baseline value in Hill equation -- set this to level that activates at reasonable levels of SKCaR"`
+ // 50% Ca concentration baseline value in Hill equation -- set this to level that activates at reasonable levels of SKCaR
+ C50 float32 `viewif:"Gbar>0" def:"0.4,0.5"`
- // [def: 15] [viewif: Gbar>0] K channel gating factor activation time constant -- roughly 5-15 msec in literature
- ActTau float32 `viewif:"Gbar>0" def:"15" desc:"K channel gating factor activation time constant -- roughly 5-15 msec in literature"`
+ // K channel gating factor activation time constant -- roughly 5-15 msec in literature
+ ActTau float32 `viewif:"Gbar>0" def:"15"`
- // [def: 30] [viewif: Gbar>0] K channel gating factor deactivation time constant -- roughly 30-50 msec in literature
- DeTau float32 `viewif:"Gbar>0" def:"30" desc:"K channel gating factor deactivation time constant -- roughly 30-50 msec in literature"`
+ // K channel gating factor deactivation time constant -- roughly 30-50 msec in literature
+ DeTau float32 `viewif:"Gbar>0" def:"30"`
- // [def: 0.4,0.8] [viewif: Gbar>0] proportion of CaIn intracellular stores that are released per spike, going into CaR
- KCaR float32 `viewif:"Gbar>0" def:"0.4,0.8" desc:"proportion of CaIn intracellular stores that are released per spike, going into CaR"`
+ // proportion of CaIn intracellular stores that are released per spike, going into CaR
+ KCaR float32 `viewif:"Gbar>0" def:"0.4,0.8"`
- // [def: 150,200] [viewif: Gbar>0] SKCaR released calcium decay time constant
- CaRDecayTau float32 `viewif:"Gbar>0" def:"150,200" desc:"SKCaR released calcium decay time constant"`
+ // SKCaR released calcium decay time constant
+ CaRDecayTau float32 `viewif:"Gbar>0" def:"150,200"`
- // [def: 0.01] [viewif: Gbar>0] level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge.
- CaInThr float32 `viewif:"Gbar>0" def:"0.01" desc:"level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge."`
+ // level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge.
+ CaInThr float32 `viewif:"Gbar>0" def:"0.01"`
- // [def: 50] [viewif: Gbar>0] time constant in msec for storing CaIn when activity is below CaInThr
- CaInTau float32 `viewif:"Gbar>0" def:"50" desc:"time constant in msec for storing CaIn when activity is below CaInThr"`
+ // time constant in msec for storing CaIn when activity is below CaInThr
+ CaInTau float32 `viewif:"Gbar>0" def:"50"`
- // [view: -] rate = 1 / tau
- ActDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ ActDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- DeDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ DeDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- CaRDecayDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ CaRDecayDt float32 `view:"-" json:"-" xml:"-"`
- // [view: -] rate = 1 / tau
- CaInDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"`
+ // rate = 1 / tau
+ CaInDt float32 `view:"-" json:"-" xml:"-"`
}
func (sp *SKCaParams) Defaults() {
diff --git a/chans/vgcc.go b/chans/vgcc.go
index dbcaa36ee..68e5e3d4d 100644
--- a/chans/vgcc.go
+++ b/chans/vgcc.go
@@ -5,7 +5,7 @@
package chans
import (
- "github.com/goki/mat32"
+ "goki.dev/mat32/v2"
)
//gosl: start chans
@@ -16,11 +16,11 @@ import (
// In particular look at the file MODEL/Poirazi_cell/CaL.g.
type VGCCParams struct {
- // [def: 0.02,0.12] strength of VGCC current -- 0.12 value is from Urakubo et al (2008) model -- best fits actual model behavior using axon equations (1.5 nominal in that model), 0.02 works better in practice for not getting stuck in high plateau firing
- Gbar float32 `def:"0.02,0.12" desc:"strength of VGCC current -- 0.12 value is from Urakubo et al (2008) model -- best fits actual model behavior using axon equations (1.5 nominal in that model), 0.02 works better in practice for not getting stuck in high plateau firing"`
+ // strength of VGCC current -- 0.12 value is from Urakubo et al (2008) model -- best fits actual model behavior using axon equations (1.5 nominal in that model), 0.02 works better in practice for not getting stuck in high plateau firing
+ Gbar float32 `def:"0.02,0.12"`
- // [def: 25] [viewif: Gbar>0] calcium from conductance factor -- important for learning contribution of VGCC
- Ca float32 `viewif:"Gbar>0" def:"25" desc:"calcium from conductance factor -- important for learning contribution of VGCC"`
+ // calcium from conductance factor -- important for learning contribution of VGCC
+ Ca float32 `viewif:"Gbar>0" def:"25"`
pad, pad1 int32
}
diff --git a/examples/attn_trn/attn.go b/examples/attn_trn/attn.go
index 06cf54a7f..7584b4596 100644
--- a/examples/attn_trn/attn.go
+++ b/examples/attn_trn/attn.go
@@ -18,24 +18,24 @@ import (
"strconv"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/emer"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/evec"
- "github.com/emer/emergent/netview"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/relpos"
- "github.com/emer/etable/agg"
- "github.com/emer/etable/eplot"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
- "github.com/emer/etable/split"
- "github.com/goki/gi/gi"
- "github.com/goki/gi/gimain"
- "github.com/goki/gi/giv"
+ "github.com/emer/emergent/v2/emer"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/evec"
+ "github.com/emer/emergent/v2/netview"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/relpos"
"github.com/goki/ki/ki"
"github.com/goki/ki/kit"
- "github.com/goki/mat32"
+ "goki.dev/etable/v2/agg"
+ "goki.dev/etable/v2/eplot"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/etable/v2/split"
+ "goki.dev/gi/v2/gi"
+ "goki.dev/gi/v2/gimain"
+ "goki.dev/gi/v2/giv"
+ "goki.dev/mat32/v2"
)
// this is the stub main for gogi that calls our actual mainrun function, at end of file
@@ -216,89 +216,89 @@ var ParamSets = params.Sets{
// for the fields which provide hints to how things should be displayed).
type Sim struct {
- // [def: 200] number of cycles per trial
- Cycles int `def:"200" desc:"number of cycles per trial"`
+ // number of cycles per trial
+ Cycles int `def:"200"`
- // [def: 10] number of runs to run to collect stats
- Runs int `def:"10" desc:"number of runs to run to collect stats"`
+ // number of runs to run to collect stats
+ Runs int `def:"10"`
- // [def: true] sodium (Na) gated potassium (K) channels that cause neurons to fatigue over time
- KNaAdapt bool `def:"true" desc:"sodium (Na) gated potassium (K) channels that cause neurons to fatigue over time"`
+ // sodium (Na) gated potassium (K) channels that cause neurons to fatigue over time
+ KNaAdapt bool `def:"true"`
- // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc
- Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"`
+ // the network -- click to view / edit parameters for layers, prjns, etc
+ Net *axon.Network `view:"no-inline"`
- // [view: Standard same-to-same size topographic projection]
+ //
Prjn3x3Skp1 *prjn.PoolTile `view:"Standard same-to-same size topographic projection"`
- // [view: Standard same-to-same size topographic projection]
+ //
Prjn5x5Skp1 *prjn.PoolTile `view:"Standard same-to-same size topographic projection"`
// select which type of test (input patterns) to use
- Test TestType `desc:"select which type of test (input patterns) to use"`
+ Test TestType
- // [view: no-inline] testing trial-level log data -- click to see record of network's response to each input
- TstTrlLog *etable.Table `view:"no-inline" desc:"testing trial-level log data -- click to see record of network's response to each input"`
+ // testing trial-level log data -- click to see record of network's response to each input
+ TstTrlLog *etable.Table `view:"no-inline"`
- // [view: no-inline] aggregated testing data
- TstRunLog *etable.Table `view:"no-inline" desc:"aggregated testing data"`
+ // aggregated testing data
+ TstRunLog *etable.Table `view:"no-inline"`
- // [view: no-inline] aggregate stats on testing data
- TstStats *etable.Table `view:"no-inline" desc:"aggregate stats on testing data"`
+ // aggregate stats on testing data
+ TstStats *etable.Table `view:"no-inline"`
- // [view: no-inline] full collection of param sets -- not really interesting for this model
- Params params.Sets `view:"no-inline" desc:"full collection of param sets -- not really interesting for this model"`
+ // full collection of param sets -- not really interesting for this model
+ Params params.Sets `view:"no-inline"`
// Testing environment -- manages iterating over testing
- TestEnv AttnEnv `desc:"Testing environment -- manages iterating over testing"`
+ TestEnv AttnEnv
// axon timing parameters and state
- Context axon.Context `desc:"axon timing parameters and state"`
+ Context axon.Context
// whether to update the network view while running
- ViewOn bool `desc:"whether to update the network view while running"`
+ ViewOn bool
// at what time scale to update the display during testing? Change to AlphaCyc to make display updating go faster
- ViewUpdt axon.TimeScales `desc:"at what time scale to update the display during testing? Change to AlphaCyc to make display updating go faster"`
+ ViewUpdt axon.TimeScales
// layer to measure attentional effects on
- AttnLay string `desc:"layer to measure attentional effects on"`
+ AttnLay string
// names of layers to record activations etc of during testing
- TstRecLays []string `desc:"names of layers to record activations etc of during testing"`
+ TstRecLays []string
// max activation in center of stimulus 1 (attended, stronger)
- S1Act float32 `desc:"max activation in center of stimulus 1 (attended, stronger)"`
+ S1Act float32
// max activation in center of stimulus 2 (ignored, weaker)
- S2Act float32 `desc:"max activation in center of stimulus 2 (ignored, weaker)"`
+ S2Act float32
// percent modulation = (S1Act - S2Act) / S1Act
- PctMod float32 `desc:"percent modulation = (S1Act - S2Act) / S1Act"`
+ PctMod float32
- // [view: -] main GUI window
- Win *gi.Window `view:"-" desc:"main GUI window"`
+ // main GUI window
+ Win *gi.Window `view:"-"`
- // [view: -] the network viewer
- NetView *netview.NetView `view:"-" desc:"the network viewer"`
+ // the network viewer
+ NetView *netview.NetView `view:"-"`
- // [view: -] the master toolbar
- ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"`
+ // the master toolbar
+ ToolBar *gi.ToolBar `view:"-"`
- // [view: -] the test-trial plot
- TstTrlPlot *eplot.Plot2D `view:"-" desc:"the test-trial plot"`
+ // the test-trial plot
+ TstTrlPlot *eplot.Plot2D `view:"-"`
- // [view: -] the test-trial plot
- TstRunPlot *eplot.Plot2D `view:"-" desc:"the test-trial plot"`
+ // the test-trial plot
+ TstRunPlot *eplot.Plot2D `view:"-"`
- // [view: -] for holding layer values
- ValsTsrs map[string]*etensor.Float32 `view:"-" desc:"for holding layer values"`
+ // for holding layer values
+ ValsTsrs map[string]*etensor.Float32 `view:"-"`
- // [view: -] true if sim is running
- IsRunning bool `view:"-" desc:"true if sim is running"`
+ // true if sim is running
+ IsRunning bool `view:"-"`
- // [view: -] flag to stop running
- StopNow bool `view:"-" desc:"flag to stop running"`
+ // flag to stop running
+ StopNow bool `view:"-"`
}
// this registers this Sim Type and gives it properties that e.g.,
diff --git a/examples/attn_trn/attn_env.go b/examples/attn_trn/attn_env.go
index aab48b93b..1ecd4201a 100644
--- a/examples/attn_trn/attn_env.go
+++ b/examples/attn_trn/attn_env.go
@@ -9,27 +9,27 @@ package main
import (
"fmt"
- "github.com/emer/emergent/efuns"
- "github.com/emer/emergent/env"
- "github.com/emer/emergent/evec"
- "github.com/emer/etable/etensor"
- "github.com/goki/mat32"
+ "github.com/emer/emergent/v2/efuns"
+ "github.com/emer/emergent/v2/env"
+ "github.com/emer/emergent/v2/evec"
+ "goki.dev/etable/v2/etensor"
+ "goki.dev/mat32/v2"
)
// Stim describes a single stimulus
type Stim struct {
// position in normalized coordintes
- Pos mat32.Vec2 `desc:"position in normalized coordintes"`
+ Pos mat32.Vec2
// feature number: 0-3 for V1 input, -1 for LIP attn
- Feat int `desc:"feature number: 0-3 for V1 input, -1 for LIP attn"`
+ Feat int
// normalized width
- Width float32 `desc:"normalized width"`
+ Width float32
// normalized contrast level
- Contrast float32 `desc:"normalized contrast level"`
+ Contrast float32
}
// PosXY returns XY position projected into size of grid
@@ -41,10 +41,10 @@ func (st *Stim) PosXY(size evec.Vec2i) mat32.Vec2 {
type StimSet struct {
// description of set
- Name string `desc:"description of set"`
+ Name string
// stims to present
- Stims []Stim `desc:"stims to present"`
+ Stims []Stim
}
// Stims is a list of a set of stimuli to present
@@ -57,52 +57,52 @@ type Stims []StimSet
type AttnEnv struct {
// name of this environment
- Nm string `desc:"name of this environment"`
+ Nm string
// description of this environment
- Dsc string `desc:"description of this environment"`
+ Dsc string
// multiplier on contrast function
- ContrastMult float32 `desc:"multiplier on contrast function"`
+ ContrastMult float32
// gain on contrast function inside exponential
- ContrastGain float32 `desc:"gain on contrast function inside exponential"`
+ ContrastGain float32
// offset on contrast function
- ContrastOff float32 `desc:"offset on contrast function"`
+ ContrastOff float32
// use gaussian for LIP -- otherwise fixed circle
- LIPGauss bool `desc:"use gaussian for LIP -- otherwise fixed circle"`
+ LIPGauss bool
// a list of stimuli to present
- Stims Stims `desc:"a list of stimuli to present"`
+ Stims Stims
// current stimuli presented
- CurStim *StimSet `inactive:"+" desc:"current stimuli presented"`
+ CurStim *StimSet `inactive:"+"`
// activation level (midpoint) -- feature is incremented, rest decremented relative to this
- Act float32 `desc:"activation level (midpoint) -- feature is incremented, rest decremented relative to this"`
+ Act float32
// size of V1 Pools
- V1Pools evec.Vec2i `desc:"size of V1 Pools"`
+ V1Pools evec.Vec2i
// size of V1 features per pool
- V1Feats evec.Vec2i `desc:"size of V1 features per pool"`
+ V1Feats evec.Vec2i
// V1 rendered input state, 4D Size x Size
- V1 etensor.Float32 `desc:"V1 rendered input state, 4D Size x Size"`
+ V1 etensor.Float32
// LIP top-down attention
- LIP etensor.Float32 `desc:"LIP top-down attention"`
+ LIP etensor.Float32
- // [view: inline] current run of model as provided during Init
- Run env.Ctr `view:"inline" desc:"current run of model as provided during Init"`
+ // current run of model as provided during Init
+ Run env.Ctr `view:"inline"`
- // [view: inline] number of times through Seq.Max number of sequences
- Epoch env.Ctr `view:"inline" desc:"number of times through Seq.Max number of sequences"`
+ // number of times through Seq.Max number of sequences
+ Epoch env.Ctr `view:"inline"`
- // [view: inline] trial increments over input states -- could add Event as a lower level
- Trial env.Ctr `view:"inline" desc:"trial increments over input states -- could add Event as a lower level"`
+ // trial increments over input states -- could add Event as a lower level
+ Trial env.Ctr `view:"inline"`
}
func (ev *AttnEnv) Name() string { return ev.Nm }
diff --git a/examples/attn_trn/stims.go b/examples/attn_trn/stims.go
index 6d815b6a7..8c8530b60 100644
--- a/examples/attn_trn/stims.go
+++ b/examples/attn_trn/stims.go
@@ -6,7 +6,7 @@
package main
-import "github.com/goki/mat32"
+import "goki.dev/mat32/v2"
// StimAttnSize is a list of stimuli manipulating the size of stimuli vs. attention
// it is the primary test of Reynolds & Heeger 2009 attentional dynamics.
diff --git a/examples/bench/bench.go b/examples/bench/bench.go
index 9d03236be..8a3a2a865 100644
--- a/examples/bench/bench.go
+++ b/examples/bench/bench.go
@@ -15,14 +15,14 @@ import (
"math/rand"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/timer"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/timer"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
)
// note: with 2 hidden layers, this simple test case converges to perfect performance:
diff --git a/examples/bench/bench_test.go b/examples/bench/bench_test.go
index 3a7421aeb..298d146a0 100644
--- a/examples/bench/bench_test.go
+++ b/examples/bench/bench_test.go
@@ -10,9 +10,9 @@ import (
"testing"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/etime"
- "github.com/emer/etable/etable"
- "github.com/goki/gi/gi"
+ "github.com/emer/emergent/v2/etime"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/gi/v2/gi"
)
func init() {
diff --git a/examples/bench_lvis/bench_lvis.go b/examples/bench_lvis/bench_lvis.go
index a56694129..345f3c08c 100644
--- a/examples/bench_lvis/bench_lvis.go
+++ b/examples/bench_lvis/bench_lvis.go
@@ -14,14 +14,14 @@ import (
"math/rand"
"github.com/emer/axon/axon"
- "github.com/emer/emergent/erand"
- "github.com/emer/emergent/etime"
- "github.com/emer/emergent/params"
- "github.com/emer/emergent/patgen"
- "github.com/emer/emergent/prjn"
- "github.com/emer/emergent/timer"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
+ "github.com/emer/emergent/v2/erand"
+ "github.com/emer/emergent/v2/etime"
+ "github.com/emer/emergent/v2/params"
+ "github.com/emer/emergent/v2/patgen"
+ "github.com/emer/emergent/v2/prjn"
+ "github.com/emer/emergent/v2/timer"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
)
var ParamSets = params.Sets{
diff --git a/examples/bench_lvis/bench_lvis_test.go b/examples/bench_lvis/bench_lvis_test.go
index e6daa6095..55b3c4830 100644
--- a/examples/bench_lvis/bench_lvis_test.go
+++ b/examples/bench_lvis/bench_lvis_test.go
@@ -10,9 +10,9 @@ import (
"testing"
"github.com/emer/axon/axon"
- "github.com/emer/etable/etable"
- "github.com/emer/etable/etensor"
"github.com/stretchr/testify/require"
+ "goki.dev/etable/v2/etable"
+ "goki.dev/etable/v2/etensor"
)
var gpu = flag.Bool("gpu", false, "whether to run gpu or not")
diff --git a/examples/bench_objrec/config.go b/examples/bench_objrec/config.go
index 7274052da..004ba3b57 100644
--- a/examples/bench_objrec/config.go
+++ b/examples/bench_objrec/config.go
@@ -4,7 +4,7 @@
package main
-import "github.com/emer/emergent/prjn"
+import "github.com/emer/emergent/v2/prjn"
// EnvConfig has config params for environment
// note: only adding fields for key Env params that matter for both Network and Env
@@ -12,37 +12,37 @@ import "github.com/emer/emergent/prjn"
type EnvConfig struct {
// env parameters -- can set any field/subfield on Env struct, using standard TOML formatting
- Env map[string]any `desc:"env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"`
+ Env map[string]any
- // [def: 5] number of units per localist output unit
- NOutPer int `def:"5" desc:"number of units per localist output unit"`
+ // number of units per localist output unit
+ NOutPer int `def:"5"`
}
// ParamConfig has config parameters related to sim params
type ParamConfig struct {
// network parameters
- Network map[string]any `desc:"network parameters"`
+ Network map[string]any
// Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params
- Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"`
+ Sheet string
// extra tag to add to file names and logs saved from this run
- Tag string `desc:"extra tag to add to file names and logs saved from this run"`
+ Tag string
// user note -- describe the run params etc -- like a git commit message for the run
- Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"`
+ Note string
// Name of the JSON file to input saved parameters from.
- File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."`
+ File string `nest:"+"`
// Save a snapshot of all current param and config settings in a directory named params_