From 6659f5dc12e8074a8665a21d9878d030a3cb1cde Mon Sep 17 00:00:00 2001 From: "Randall C. O'Reilly" Date: Fri, 8 Dec 2023 22:31:22 -0800 Subject: [PATCH] update to v2 --- axon/act.go | 336 ++++++++++++------------- axon/act_prjn.go | 28 +-- axon/act_test.go | 2 +- axon/axon.go | 2 +- axon/basic_test.go | 12 +- axon/context.go | 102 ++++---- axon/deep_layers.go | 34 +-- axon/deep_net.go | 6 +- axon/gpu.go | 92 +++---- axon/helpers.go | 6 +- axon/hip_net.go | 82 +++--- axon/inhib.go | 68 ++--- axon/layer.go | 6 +- axon/layer_compute.go | 4 +- axon/layer_test.go | 4 +- axon/layerbase.go | 92 +++---- axon/layerparams.go | 106 ++++---- axon/learn.go | 220 ++++++++-------- axon/logging.go | 32 +-- axon/looper.go | 10 +- axon/network.go | 6 +- axon/network_test.go | 2 +- axon/networkbase.go | 168 ++++++------- axon/networkbase_test.go | 2 +- axon/neuromod.go | 32 +-- axon/neuron.go | 2 +- axon/pcore_layers.go | 28 +-- axon/pcore_net.go | 2 +- axon/pool.go | 68 ++--- axon/pool_test.go | 4 +- axon/prjn.go | 8 +- axon/prjnbase.go | 86 +++---- axon/prjnparams.go | 54 ++-- axon/pvlv.go | 96 +++---- axon/pvlv_layers.go | 50 ++-- axon/pvlv_net.go | 6 +- axon/rl_layers.go | 18 +- axon/rl_net.go | 4 +- axon/threads.go | 2 +- axon/threads_test.go | 10 +- chans/ak.go | 44 ++-- chans/gabab.go | 38 +-- chans/mahp.go | 24 +- chans/nmda.go | 34 +-- chans/sahp.go | 30 +-- chans/skca.go | 50 ++-- chans/vgcc.go | 10 +- examples/attn_trn/attn.go | 120 ++++----- examples/attn_trn/attn_env.go | 60 ++--- examples/attn_trn/stims.go | 2 +- examples/bench/bench.go | 16 +- examples/bench/bench_test.go | 6 +- examples/bench_lvis/bench_lvis.go | 16 +- examples/bench_lvis/bench_lvis_test.go | 4 +- examples/bench_objrec/config.go | 108 ++++---- examples/bench_objrec/led_env.go | 46 ++-- examples/bench_objrec/leds.go | 30 +-- examples/bench_objrec/objrec.go | 84 +++---- examples/bench_objrec/params.go | 4 +- examples/bench_objrec/v1filter.go | 70 +++--- examples/boa/boa.go | 94 +++---- examples/boa/boa_test.go | 4 +- examples/boa/config.go | 114 ++++----- examples/boa/params.go | 4 +- examples/deep_fsa/deep_fsa.go | 78 +++--- examples/deep_fsa/fsa_env.go | 50 ++-- examples/deep_fsa/params.go | 4 +- examples/deep_move/deep_move.go | 78 +++--- examples/deep_move/move_env.go | 70 +++--- examples/deep_move/params.go | 4 +- examples/deep_music/deep_music.go | 76 +++--- examples/deep_music/music_env.go | 52 ++-- examples/deep_music/params.go | 4 +- examples/hip/def_params.go | 4 +- examples/hip/hip.go | 114 ++++----- examples/hip/orig_params.go | 2 +- examples/inhib/config.go | 68 ++--- examples/inhib/inhib.go | 74 +++--- examples/inhib/params.go | 4 +- examples/kinaseq/kinaseq.go | 110 ++++---- examples/kinaseq/neuron.go | 24 +- examples/mpi/params.go | 4 +- examples/mpi/ra25.go | 196 +++++++-------- examples/neuron/neuron.go | 96 +++---- examples/neuron/neuron_test.go | 2 +- examples/pcore/gono_env.go | 72 +++--- examples/pcore/params.go | 4 +- examples/pcore/pcore.go | 80 +++--- examples/pcore/pcore_test.go | 2 +- examples/pvlv/effort_plot.go | 58 ++--- examples/pvlv/params.go | 4 +- examples/pvlv/pvlv.go | 80 +++--- examples/pvlv/pvlv_test.go | 2 +- examples/ra25/params.go | 4 +- examples/ra25/ra25.go | 201 ++++++++------- examples/ra25/weights_test.go | 4 +- examples/ra25x/config.go | 110 ++++---- examples/ra25x/params.go | 4 +- examples/ra25x/ra25x.go | 82 +++--- examples/rl/cond_env.go | 74 +++--- examples/rl/params.go | 4 +- examples/rl/rl.go | 68 ++--- fffb/inhib.go | 16 +- fsfffb/inhib.go | 42 ++-- go.mod | 74 +----- go.sum | 4 +- interinhib/interinhib.go | 10 +- kinase/params.go | 52 ++-- kinasex/contsyn.go | 6 +- nxx1/nxx1.go | 58 ++--- nxx1/nxx1_test.go | 2 +- 111 files changed, 2532 insertions(+), 2603 deletions(-) diff --git a/axon/act.go b/axon/act.go index 803441deb..2f1228f7a 100644 --- a/axon/act.go +++ b/axon/act.go @@ -6,10 +6,10 @@ package axon import ( "github.com/emer/axon/chans" - "github.com/emer/emergent/erand" - "github.com/emer/etable/minmax" + "github.com/emer/emergent/v2/erand" "github.com/goki/gosl/slbool" - "github.com/goki/mat32" + "goki.dev/etable/v2/minmax" + "goki.dev/mat32/v2" ) /////////////////////////////////////////////////////////////////////// @@ -31,38 +31,38 @@ import ( // the AdEx adaptive exponential function (adapt is KNaAdapt) type SpikeParams struct { - // [def: 0.5] threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization - Thr float32 `def:"0.5" desc:"threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization"` + // threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization + Thr float32 `def:"0.5"` - // [def: 0.3] post-spiking membrane potential to reset to, produces refractory effect if lower than VmInit -- 0.3 is apropriate biologically-based value for AdEx (Brette & Gurstner, 2005) parameters. See also RTau - VmR float32 `def:"0.3" desc:"post-spiking membrane potential to reset to, produces refractory effect if lower than VmInit -- 0.3 is apropriate biologically-based value for AdEx (Brette & Gurstner, 2005) parameters. See also RTau"` + // post-spiking membrane potential to reset to, produces refractory effect if lower than VmInit -- 0.3 is apropriate biologically-based value for AdEx (Brette & Gurstner, 2005) parameters. See also RTau + VmR float32 `def:"0.3"` - // [def: 3] [min: 1] post-spiking explicit refractory period, in cycles -- prevents Vm updating for this number of cycles post firing -- Vm is reduced in exponential steps over this period according to RTau, being fixed at Tr to VmR exactly - Tr int32 `min:"1" def:"3" desc:"post-spiking explicit refractory period, in cycles -- prevents Vm updating for this number of cycles post firing -- Vm is reduced in exponential steps over this period according to RTau, being fixed at Tr to VmR exactly"` + // post-spiking explicit refractory period, in cycles -- prevents Vm updating for this number of cycles post firing -- Vm is reduced in exponential steps over this period according to RTau, being fixed at Tr to VmR exactly + Tr int32 `min:"1" def:"3"` - // [def: 1.6667] time constant for decaying Vm down to VmR -- at end of Tr it is set to VmR exactly -- this provides a more realistic shape of the post-spiking Vm which is only relevant for more realistic channels that key off of Vm -- does not otherwise affect standard computation - RTau float32 `def:"1.6667" desc:"time constant for decaying Vm down to VmR -- at end of Tr it is set to VmR exactly -- this provides a more realistic shape of the post-spiking Vm which is only relevant for more realistic channels that key off of Vm -- does not otherwise affect standard computation"` + // time constant for decaying Vm down to VmR -- at end of Tr it is set to VmR exactly -- this provides a more realistic shape of the post-spiking Vm which is only relevant for more realistic channels that key off of Vm -- does not otherwise affect standard computation + RTau float32 `def:"1.6667"` - // [def: true] if true, turn on exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation - Exp slbool.Bool `def:"true" desc:"if true, turn on exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation"` + // if true, turn on exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation + Exp slbool.Bool `def:"true"` - // [def: 0.02] [viewif: Exp] slope in Vm (2 mV = .02 in normalized units) for extra exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation - ExpSlope float32 `viewif:"Exp" def:"0.02" desc:"slope in Vm (2 mV = .02 in normalized units) for extra exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation"` + // slope in Vm (2 mV = .02 in normalized units) for extra exponential excitatory current that drives Vm rapidly upward for spiking as it gets past its nominal firing threshold (Thr) -- nicely captures the Hodgkin Huxley dynamics of Na and K channels -- uses Brette & Gurstner 2005 AdEx formulation + ExpSlope float32 `viewif:"Exp" def:"0.02"` - // [def: 0.9] [viewif: Exp] membrane potential threshold for actually triggering a spike when using the exponential mechanism - ExpThr float32 `viewif:"Exp" def:"0.9" desc:"membrane potential threshold for actually triggering a spike when using the exponential mechanism"` + // membrane potential threshold for actually triggering a spike when using the exponential mechanism + ExpThr float32 `viewif:"Exp" def:"0.9"` - // [def: 180] [min: 1] for translating spiking interval (rate) into rate-code activation equivalent, what is the maximum firing rate associated with a maximum activation value of 1 - MaxHz float32 `def:"180" min:"1" desc:"for translating spiking interval (rate) into rate-code activation equivalent, what is the maximum firing rate associated with a maximum activation value of 1"` + // for translating spiking interval (rate) into rate-code activation equivalent, what is the maximum firing rate associated with a maximum activation value of 1 + MaxHz float32 `def:"180" min:"1"` - // [def: 5] [min: 1] constant for integrating the spiking interval in estimating spiking rate - ISITau float32 `def:"5" min:"1" desc:"constant for integrating the spiking interval in estimating spiking rate"` + // constant for integrating the spiking interval in estimating spiking rate + ISITau float32 `def:"5" min:"1"` - // [view: -] rate = 1 / tau - ISIDt float32 `view:"-" desc:"rate = 1 / tau"` + // rate = 1 / tau + ISIDt float32 `view:"-"` - // [view: -] rate = 1 / tau - RDt float32 `view:"-" desc:"rate = 1 / tau"` + // rate = 1 / tau + RDt float32 `view:"-"` pad int32 } @@ -124,23 +124,23 @@ func (sk *SpikeParams) AvgFmISI(avg float32, isi float32) float32 { // DendParams are the parameters for updating dendrite-specific dynamics type DendParams struct { - // [def: 0.2,0.5] dendrite-specific strength multiplier of the exponential spiking drive on Vm -- e.g., .5 makes it half as strong as at the soma (which uses Gbar.L as a strength multiplier per the AdEx standard model) - GbarExp float32 `def:"0.2,0.5" desc:"dendrite-specific strength multiplier of the exponential spiking drive on Vm -- e.g., .5 makes it half as strong as at the soma (which uses Gbar.L as a strength multiplier per the AdEx standard model)"` + // dendrite-specific strength multiplier of the exponential spiking drive on Vm -- e.g., .5 makes it half as strong as at the soma (which uses Gbar.L as a strength multiplier per the AdEx standard model) + GbarExp float32 `def:"0.2,0.5"` - // [def: 3,6] dendrite-specific conductance of Kdr delayed rectifier currents, used to reset membrane potential for dendrite -- applied for Tr msec - GbarR float32 `def:"3,6" desc:"dendrite-specific conductance of Kdr delayed rectifier currents, used to reset membrane potential for dendrite -- applied for Tr msec"` + // dendrite-specific conductance of Kdr delayed rectifier currents, used to reset membrane potential for dendrite -- applied for Tr msec + GbarR float32 `def:"3,6"` - // [def: 0,2] SST+ somatostatin positive slow spiking inhibition level specifically affecting dendritic Vm (VmDend) -- this is important for countering a positive feedback loop from NMDA getting stronger over the course of learning -- also typically requires SubMean = 1 for TrgAvgAct and learning to fully counter this feedback loop. - SSGi float32 `def:"0,2" desc:"SST+ somatostatin positive slow spiking inhibition level specifically affecting dendritic Vm (VmDend) -- this is important for countering a positive feedback loop from NMDA getting stronger over the course of learning -- also typically requires SubMean = 1 for TrgAvgAct and learning to fully counter this feedback loop."` + // SST+ somatostatin positive slow spiking inhibition level specifically affecting dendritic Vm (VmDend) -- this is important for countering a positive feedback loop from NMDA getting stronger over the course of learning -- also typically requires SubMean = 1 for TrgAvgAct and learning to fully counter this feedback loop. + SSGi float32 `def:"0,2"` // set automatically based on whether this layer has any recv projections that have a GType conductance type of Modulatory -- if so, then multiply GeSyn etc by GModSyn - HasMod slbool.Bool `inactive:"+" desc:"set automatically based on whether this layer has any recv projections that have a GType conductance type of Modulatory -- if so, then multiply GeSyn etc by GModSyn"` + HasMod slbool.Bool `inactive:"+"` // multiplicative gain factor on the total modulatory input -- this can also be controlled by the PrjnScale.Abs factor on ModulatoryG inputs, but it is convenient to be able to control on the layer as well. - ModGain float32 `desc:"multiplicative gain factor on the total modulatory input -- this can also be controlled by the PrjnScale.Abs factor on ModulatoryG inputs, but it is convenient to be able to control on the layer as well."` + ModGain float32 // baseline modulatory level for modulatory effects -- net modulation is ModBase + ModGain * GModSyn - ModBase float32 `desc:"baseline modulatory level for modulatory effects -- net modulation is ModBase + ModGain * GModSyn"` + ModBase float32 pad, pad1 int32 } @@ -163,23 +163,23 @@ func (dp *DendParams) Update() { // Initialized in InitActs called by InitWts, and provides target values for DecayState. type ActInitParams struct { - // [def: 0.3] initial membrane potential -- see Erev.L for the resting potential (typically .3) - Vm float32 `def:"0.3" desc:"initial membrane potential -- see Erev.L for the resting potential (typically .3)"` + // initial membrane potential -- see Erev.L for the resting potential (typically .3) + Vm float32 `def:"0.3"` - // [def: 0] initial activation value -- typically 0 - Act float32 `def:"0" desc:"initial activation value -- typically 0"` + // initial activation value -- typically 0 + Act float32 `def:"0"` - // [def: 0] baseline level of excitatory conductance (net input) -- Ge is initialized to this value, and it is added in as a constant background level of excitatory input -- captures all the other inputs not represented in the model, and intrinsic excitability, etc - GeBase float32 `def:"0" desc:"baseline level of excitatory conductance (net input) -- Ge is initialized to this value, and it is added in as a constant background level of excitatory input -- captures all the other inputs not represented in the model, and intrinsic excitability, etc"` + // baseline level of excitatory conductance (net input) -- Ge is initialized to this value, and it is added in as a constant background level of excitatory input -- captures all the other inputs not represented in the model, and intrinsic excitability, etc + GeBase float32 `def:"0"` - // [def: 0] baseline level of inhibitory conductance (net input) -- Gi is initialized to this value, and it is added in as a constant background level of inhibitory input -- captures all the other inputs not represented in the model - GiBase float32 `def:"0" desc:"baseline level of inhibitory conductance (net input) -- Gi is initialized to this value, and it is added in as a constant background level of inhibitory input -- captures all the other inputs not represented in the model"` + // baseline level of inhibitory conductance (net input) -- Gi is initialized to this value, and it is added in as a constant background level of inhibitory input -- captures all the other inputs not represented in the model + GiBase float32 `def:"0"` - // [def: 0] variance (sigma) of gaussian distribution around baseline Ge values, per unit, to establish variability in intrinsic excitability. value never goes < 0 - GeVar float32 `def:"0" desc:"variance (sigma) of gaussian distribution around baseline Ge values, per unit, to establish variability in intrinsic excitability. value never goes < 0"` + // variance (sigma) of gaussian distribution around baseline Ge values, per unit, to establish variability in intrinsic excitability. value never goes < 0 + GeVar float32 `def:"0"` - // [def: 0] variance (sigma) of gaussian distribution around baseline Gi values, per unit, to establish variability in intrinsic excitability. value never goes < 0 - GiVar float32 `def:"0" desc:"variance (sigma) of gaussian distribution around baseline Gi values, per unit, to establish variability in intrinsic excitability. value never goes < 0"` + // variance (sigma) of gaussian distribution around baseline Gi values, per unit, to establish variability in intrinsic excitability. value never goes < 0 + GiVar float32 `def:"0"` pad, pad1 int32 } @@ -231,20 +231,20 @@ func (ai *ActInitParams) GetGiBase(rnd erand.Rand) float32 { // called in NewState when a new state is to be processed. type DecayParams struct { - // [def: 0,0.2,0.5,1] [min: 0] [max: 1] proportion to decay most activation state variables toward initial values at start of every ThetaCycle (except those controlled separately below) -- if 1 it is effectively equivalent to full clear, resetting other derived values. ISI is reset every AlphaCycle to get a fresh sample of activations (doesn't affect direct computation -- only readout). - Act float32 `def:"0,0.2,0.5,1" max:"1" min:"0" desc:"proportion to decay most activation state variables toward initial values at start of every ThetaCycle (except those controlled separately below) -- if 1 it is effectively equivalent to full clear, resetting other derived values. ISI is reset every AlphaCycle to get a fresh sample of activations (doesn't affect direct computation -- only readout)."` + // proportion to decay most activation state variables toward initial values at start of every ThetaCycle (except those controlled separately below) -- if 1 it is effectively equivalent to full clear, resetting other derived values. ISI is reset every AlphaCycle to get a fresh sample of activations (doesn't affect direct computation -- only readout). + Act float32 `def:"0,0.2,0.5,1" max:"1" min:"0"` - // [def: 0,0.6] [min: 0] [max: 1] proportion to decay long-lasting conductances, NMDA and GABA, and also the dendritic membrane potential -- when using random stimulus order, it is important to decay this significantly to allow a fresh start -- but set Act to 0 to enable ongoing activity to keep neurons in their sensitive regime. - Glong float32 `def:"0,0.6" max:"1" min:"0" desc:"proportion to decay long-lasting conductances, NMDA and GABA, and also the dendritic membrane potential -- when using random stimulus order, it is important to decay this significantly to allow a fresh start -- but set Act to 0 to enable ongoing activity to keep neurons in their sensitive regime."` + // proportion to decay long-lasting conductances, NMDA and GABA, and also the dendritic membrane potential -- when using random stimulus order, it is important to decay this significantly to allow a fresh start -- but set Act to 0 to enable ongoing activity to keep neurons in their sensitive regime. + Glong float32 `def:"0,0.6" max:"1" min:"0"` - // [def: 0] [min: 0] [max: 1] decay of afterhyperpolarization currents, including mAHP, sAHP, and KNa -- has a separate decay because often useful to have this not decay at all even if decay is on. - AHP float32 `def:"0" max:"1" min:"0" desc:"decay of afterhyperpolarization currents, including mAHP, sAHP, and KNa -- has a separate decay because often useful to have this not decay at all even if decay is on."` + // decay of afterhyperpolarization currents, including mAHP, sAHP, and KNa -- has a separate decay because often useful to have this not decay at all even if decay is on. + AHP float32 `def:"0" max:"1" min:"0"` - // [def: 0] [min: 0] [max: 1] decay of Ca variables driven by spiking activity used in learning: CaSpk* and Ca* variables. These are typically not decayed but may need to be in some situations. - LearnCa float32 `def:"0" max:"1" min:"0" desc:"decay of Ca variables driven by spiking activity used in learning: CaSpk* and Ca* variables. These are typically not decayed but may need to be in some situations."` + // decay of Ca variables driven by spiking activity used in learning: CaSpk* and Ca* variables. These are typically not decayed but may need to be in some situations. + LearnCa float32 `def:"0" max:"1" min:"0"` // decay layer at end of ThetaCycle when there is a global reward -- true by default for PTPred, PTMaint and PFC Super layers - OnRew slbool.Bool `desc:"decay layer at end of ThetaCycle when there is a global reward -- true by default for PTPred, PTMaint and PFC Super layers"` + OnRew slbool.Bool pad, pad1, pad2 float32 } @@ -265,53 +265,53 @@ func (dp *DecayParams) Defaults() { // DtParams are time and rate constants for temporal derivatives in Axon (Vm, G) type DtParams struct { - // [def: 1,0.5] [min: 0] overall rate constant for numerical integration, for all equations at the unit level -- all time constants are specified in millisecond units, with one cycle = 1 msec -- if you instead want to make one cycle = 2 msec, you can do this globally by setting this integ value to 2 (etc). However, stability issues will likely arise if you go too high. For improved numerical stability, you may even need to reduce this value to 0.5 or possibly even lower (typically however this is not necessary). MUST also coordinate this with network.time_inc variable to ensure that global network.time reflects simulated time accurately - Integ float32 `def:"1,0.5" min:"0" desc:"overall rate constant for numerical integration, for all equations at the unit level -- all time constants are specified in millisecond units, with one cycle = 1 msec -- if you instead want to make one cycle = 2 msec, you can do this globally by setting this integ value to 2 (etc). However, stability issues will likely arise if you go too high. For improved numerical stability, you may even need to reduce this value to 0.5 or possibly even lower (typically however this is not necessary). MUST also coordinate this with network.time_inc variable to ensure that global network.time reflects simulated time accurately"` + // overall rate constant for numerical integration, for all equations at the unit level -- all time constants are specified in millisecond units, with one cycle = 1 msec -- if you instead want to make one cycle = 2 msec, you can do this globally by setting this integ value to 2 (etc). However, stability issues will likely arise if you go too high. For improved numerical stability, you may even need to reduce this value to 0.5 or possibly even lower (typically however this is not necessary). MUST also coordinate this with network.time_inc variable to ensure that global network.time reflects simulated time accurately + Integ float32 `def:"1,0.5" min:"0"` - // [def: 2.81] [min: 1] membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized - VmTau float32 `def:"2.81" min:"1" desc:"membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized"` + // membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized + VmTau float32 `def:"2.81" min:"1"` - // [def: 5] [min: 1] dendritic membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized - VmDendTau float32 `def:"5" min:"1" desc:"dendritic membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized"` + // dendritic membrane potential time constant in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life) -- reflects the capacitance of the neuron in principle -- biological default for AdEx spiking model C = 281 pF = 2.81 normalized + VmDendTau float32 `def:"5" min:"1"` - // [def: 2] [min: 1] number of integration steps to take in computing new Vm value -- this is the one computation that can be most numerically unstable so taking multiple steps with proportionally smaller dt is beneficial - VmSteps int32 `def:"2" min:"1" desc:"number of integration steps to take in computing new Vm value -- this is the one computation that can be most numerically unstable so taking multiple steps with proportionally smaller dt is beneficial"` + // number of integration steps to take in computing new Vm value -- this is the one computation that can be most numerically unstable so taking multiple steps with proportionally smaller dt is beneficial + VmSteps int32 `def:"2" min:"1"` - // [def: 5] [min: 1] time constant for decay of excitatory AMPA receptor conductance. - GeTau float32 `def:"5" min:"1" desc:"time constant for decay of excitatory AMPA receptor conductance."` + // time constant for decay of excitatory AMPA receptor conductance. + GeTau float32 `def:"5" min:"1"` - // [def: 7] [min: 1] time constant for decay of inhibitory GABAa receptor conductance. - GiTau float32 `def:"7" min:"1" desc:"time constant for decay of inhibitory GABAa receptor conductance."` + // time constant for decay of inhibitory GABAa receptor conductance. + GiTau float32 `def:"7" min:"1"` - // [def: 40] [min: 1] time constant for integrating values over timescale of an individual input state (e.g., roughly 200 msec -- theta cycle), used in computing ActInt, GeInt from Ge, and GiInt from GiSyn -- this is used for scoring performance, not for learning, in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life), - IntTau float32 `def:"40" min:"1" desc:"time constant for integrating values over timescale of an individual input state (e.g., roughly 200 msec -- theta cycle), used in computing ActInt, GeInt from Ge, and GiInt from GiSyn -- this is used for scoring performance, not for learning, in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life), "` + // time constant for integrating values over timescale of an individual input state (e.g., roughly 200 msec -- theta cycle), used in computing ActInt, GeInt from Ge, and GiInt from GiSyn -- this is used for scoring performance, not for learning, in cycles, which should be milliseconds typically (tau is roughly how long it takes for value to change significantly -- 1.4x the half-life), + IntTau float32 `def:"40" min:"1"` - // [def: 20] [min: 1] time constant for integrating slower long-time-scale averages, such as nrn.ActAvg, Pool.ActsMAvg, ActsPAvg -- computed in NewState when a new input state is present (i.e., not msec but in units of a theta cycle) (tau is roughly how long it takes for value to change significantly) -- set lower for smaller models - LongAvgTau float32 `def:"20" min:"1" desc:"time constant for integrating slower long-time-scale averages, such as nrn.ActAvg, Pool.ActsMAvg, ActsPAvg -- computed in NewState when a new input state is present (i.e., not msec but in units of a theta cycle) (tau is roughly how long it takes for value to change significantly) -- set lower for smaller models"` + // time constant for integrating slower long-time-scale averages, such as nrn.ActAvg, Pool.ActsMAvg, ActsPAvg -- computed in NewState when a new input state is present (i.e., not msec but in units of a theta cycle) (tau is roughly how long it takes for value to change significantly) -- set lower for smaller models + LongAvgTau float32 `def:"20" min:"1"` - // [def: 10] [min: 0] cycle to start updating the SpkMaxCa, SpkMax values within a theta cycle -- early cycles often reflect prior state - MaxCycStart int32 `def:"10" min:"0" desc:"cycle to start updating the SpkMaxCa, SpkMax values within a theta cycle -- early cycles often reflect prior state"` + // cycle to start updating the SpkMaxCa, SpkMax values within a theta cycle -- early cycles often reflect prior state + MaxCycStart int32 `def:"10" min:"0"` - // [view: -] nominal rate = Integ / tau - VmDt float32 `view:"-" json:"-" xml:"-" desc:"nominal rate = Integ / tau"` + // nominal rate = Integ / tau + VmDt float32 `view:"-" json:"-" xml:"-"` - // [view: -] nominal rate = Integ / tau - VmDendDt float32 `view:"-" json:"-" xml:"-" desc:"nominal rate = Integ / tau"` + // nominal rate = Integ / tau + VmDendDt float32 `view:"-" json:"-" xml:"-"` - // [view: -] 1 / VmSteps - DtStep float32 `view:"-" json:"-" xml:"-" desc:"1 / VmSteps"` + // 1 / VmSteps + DtStep float32 `view:"-" json:"-" xml:"-"` - // [view: -] rate = Integ / tau - GeDt float32 `view:"-" json:"-" xml:"-" desc:"rate = Integ / tau"` + // rate = Integ / tau + GeDt float32 `view:"-" json:"-" xml:"-"` - // [view: -] rate = Integ / tau - GiDt float32 `view:"-" json:"-" xml:"-" desc:"rate = Integ / tau"` + // rate = Integ / tau + GiDt float32 `view:"-" json:"-" xml:"-"` - // [view: -] rate = Integ / tau - IntDt float32 `view:"-" json:"-" xml:"-" desc:"rate = Integ / tau"` + // rate = Integ / tau + IntDt float32 `view:"-" json:"-" xml:"-"` - // [view: -] rate = 1 / tau - LongAvgDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"` + // rate = 1 / tau + LongAvgDt float32 `view:"-" json:"-" xml:"-"` } func (dp *DtParams) Update() { @@ -393,25 +393,25 @@ func (dp *DtParams) AvgVarUpdt(avg, vr *float32, val float32) { type SpikeNoiseParams struct { // add noise simulating background spiking levels - On slbool.Bool `desc:"add noise simulating background spiking levels"` + On slbool.Bool - // [def: 100] [viewif: On] mean frequency of excitatory spikes -- typically 50Hz but multiple inputs increase rate -- poisson lambda parameter, also the variance - GeHz float32 `viewif:"On" def:"100" desc:"mean frequency of excitatory spikes -- typically 50Hz but multiple inputs increase rate -- poisson lambda parameter, also the variance"` + // mean frequency of excitatory spikes -- typically 50Hz but multiple inputs increase rate -- poisson lambda parameter, also the variance + GeHz float32 `viewif:"On" def:"100"` - // [viewif: On] [min: 0] excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs - Ge float32 `viewif:"On" min:"0" desc:"excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs"` + // excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs + Ge float32 `viewif:"On" min:"0"` - // [def: 200] [viewif: On] mean frequency of inhibitory spikes -- typically 100Hz fast spiking but multiple inputs increase rate -- poisson lambda parameter, also the variance - GiHz float32 `viewif:"On" def:"200" desc:"mean frequency of inhibitory spikes -- typically 100Hz fast spiking but multiple inputs increase rate -- poisson lambda parameter, also the variance"` + // mean frequency of inhibitory spikes -- typically 100Hz fast spiking but multiple inputs increase rate -- poisson lambda parameter, also the variance + GiHz float32 `viewif:"On" def:"200"` - // [viewif: On] [min: 0] excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs - Gi float32 `viewif:"On" min:"0" desc:"excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs"` + // excitatory conductance per spike -- .001 has minimal impact, .01 can be strong, and .15 is needed to influence timing of clamped inputs + Gi float32 `viewif:"On" min:"0"` - // [view: -] Exp(-Interval) which is the threshold for GeNoiseP as it is updated - GeExpInt float32 `view:"-" json:"-" xml:"-" desc:"Exp(-Interval) which is the threshold for GeNoiseP as it is updated"` + // Exp(-Interval) which is the threshold for GeNoiseP as it is updated + GeExpInt float32 `view:"-" json:"-" xml:"-"` - // [view: -] Exp(-Interval) which is the threshold for GiNoiseP as it is updated - GiExpInt float32 `view:"-" json:"-" xml:"-" desc:"Exp(-Interval) which is the threshold for GiNoiseP as it is updated"` + // Exp(-Interval) which is the threshold for GiNoiseP as it is updated + GiExpInt float32 `view:"-" json:"-" xml:"-"` pad int32 } @@ -460,19 +460,19 @@ func (an *SpikeNoiseParams) PGi(ctx *Context, p *float32, ni uint32) float32 { type ClampParams struct { // is this a clamped input layer? set automatically based on layer type at initialization - IsInput slbool.Bool `inactive:"+" desc:"is this a clamped input layer? set automatically based on layer type at initialization"` + IsInput slbool.Bool `inactive:"+"` // is this a target layer? set automatically based on layer type at initialization - IsTarget slbool.Bool `inactive:"+" desc:"is this a target layer? set automatically based on layer type at initialization"` + IsTarget slbool.Bool `inactive:"+"` - // [def: 0.8,1.5] amount of Ge driven for clamping -- generally use 0.8 for Target layers, 1.5 for Input layers - Ge float32 `def:"0.8,1.5" desc:"amount of Ge driven for clamping -- generally use 0.8 for Target layers, 1.5 for Input layers"` + // amount of Ge driven for clamping -- generally use 0.8 for Target layers, 1.5 for Input layers + Ge float32 `def:"0.8,1.5"` - // [def: false] [view: add external conductance on top of any existing -- generally this is not a good idea for target layers (creates a main effect that learning can never match), but may be ok for input layers] + // Add slbool.Bool `def:"false" view:"add external conductance on top of any existing -- generally this is not a good idea for target layers (creates a main effect that learning can never match), but may be ok for input layers"` - // [def: 0.5] threshold on neuron Act activity to count as active for computing error relative to target in PctErr method - ErrThr float32 `def:"0.5" desc:"threshold on neuron Act activity to count as active for computing error relative to target in PctErr method"` + // threshold on neuron Act activity to count as active for computing error relative to target in PctErr method + ErrThr float32 `def:"0.5"` pad, pad1, pad2 float32 } @@ -492,13 +492,13 @@ func (cp *ClampParams) Defaults() { type AttnParams struct { // is attentional modulation active? - On slbool.Bool `desc:"is attentional modulation active?"` + On slbool.Bool - // [viewif: On] minimum act multiplier if attention is 0 - Min float32 `viewif:"On" desc:"minimum act multiplier if attention is 0"` + // minimum act multiplier if attention is 0 + Min float32 `viewif:"On"` // threshold on CaSpkP for determining the reaction time for the Layer -- starts after MaxCycStart to ensure that prior trial activity has had a chance to dissipate. - RTThr float32 `desc:"threshold on CaSpkP for determining the reaction time for the Layer -- starts after MaxCycStart to ensure that prior trial activity has had a chance to dissipate."` + RTThr float32 pad int32 } @@ -535,28 +535,28 @@ func (at *AttnParams) ModVal(val float32, attn float32) float32 { type PopCodeParams struct { // use popcode encoding of variable(s) that this layer represents - On slbool.Bool `desc:"use popcode encoding of variable(s) that this layer represents"` + On slbool.Bool - // [def: 0.1] [viewif: On] Ge multiplier for driving excitatory conductance based on PopCode -- multiplies normalized activation values - Ge float32 `viewif:"On" def:"0.1" desc:"Ge multiplier for driving excitatory conductance based on PopCode -- multiplies normalized activation values"` + // Ge multiplier for driving excitatory conductance based on PopCode -- multiplies normalized activation values + Ge float32 `viewif:"On" def:"0.1"` - // [def: -0.1] [viewif: On] minimum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode - Min float32 `viewif:"On" def:"-0.1" desc:"minimum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode"` + // minimum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode + Min float32 `viewif:"On" def:"-0.1"` - // [def: 1.1] [viewif: On] maximum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode - Max float32 `viewif:"On" def:"1.1" desc:"maximum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode"` + // maximum value representable -- for GaussBump, typically include extra to allow mean with activity on either side to represent the lowest value you want to encode + Max float32 `viewif:"On" def:"1.1"` - // [def: 1,0.5] [viewif: On] activation multiplier for values at Min end of range, where values at Max end have an activation of 1 -- if this is < 1, then there is a rate code proportional to the value in addition to the popcode pattern -- see also MinSigma, MaxSigma - MinAct float32 `viewif:"On" def:"1,0.5" desc:"activation multiplier for values at Min end of range, where values at Max end have an activation of 1 -- if this is < 1, then there is a rate code proportional to the value in addition to the popcode pattern -- see also MinSigma, MaxSigma"` + // activation multiplier for values at Min end of range, where values at Max end have an activation of 1 -- if this is < 1, then there is a rate code proportional to the value in addition to the popcode pattern -- see also MinSigma, MaxSigma + MinAct float32 `viewif:"On" def:"1,0.5"` - // [def: 0.1,0.08] [viewif: On] sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally - MinSigma float32 `viewif:"On" def:"0.1,0.08" desc:"sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally"` + // sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally + MinSigma float32 `viewif:"On" def:"0.1,0.08"` - // [def: 0.1,0.12] [viewif: On] sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally - MaxSigma float32 `viewif:"On" def:"0.1,0.12" desc:"sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally"` + // sigma parameter of a gaussian specifying the tuning width of the coarse-coded units, in normalized 0-1 range -- for Min value -- if MinSigma < MaxSigma then more units are activated for Max values vs. Min values, proportionally + MaxSigma float32 `viewif:"On" def:"0.1,0.12"` - // [viewif: On] ensure that encoded and decoded value remains within specified range - Clip slbool.Bool `viewif:"On" desc:"ensure that encoded and decoded value remains within specified range"` + // ensure that encoded and decoded value remains within specified range + Clip slbool.Bool `viewif:"On"` } func (pc *PopCodeParams) Defaults() { @@ -635,68 +635,68 @@ func (pc *PopCodeParams) EncodeGe(i, n uint32, val float32) float32 { // This is included in axon.Layer to drive the computation. type ActParams struct { - // [view: inline] Spiking function parameters - Spikes SpikeParams `view:"inline" desc:"Spiking function parameters"` + // Spiking function parameters + Spikes SpikeParams `view:"inline"` - // [view: inline] dendrite-specific parameters - Dend DendParams `view:"inline" desc:"dendrite-specific parameters"` + // dendrite-specific parameters + Dend DendParams `view:"inline"` - // [view: inline] initial values for key network state variables -- initialized in InitActs called by InitWts, and provides target values for DecayState - Init ActInitParams `view:"inline" desc:"initial values for key network state variables -- initialized in InitActs called by InitWts, and provides target values for DecayState"` + // initial values for key network state variables -- initialized in InitActs called by InitWts, and provides target values for DecayState + Init ActInitParams `view:"inline"` - // [view: inline] amount to decay between AlphaCycles, simulating passage of time and effects of saccades etc, especially important for environments with random temporal structure (e.g., most standard neural net training corpora) - Decay DecayParams `view:"inline" desc:"amount to decay between AlphaCycles, simulating passage of time and effects of saccades etc, especially important for environments with random temporal structure (e.g., most standard neural net training corpora) "` + // amount to decay between AlphaCycles, simulating passage of time and effects of saccades etc, especially important for environments with random temporal structure (e.g., most standard neural net training corpora) + Decay DecayParams `view:"inline"` - // [view: inline] time and rate constants for temporal derivatives / updating of activation state - Dt DtParams `view:"inline" desc:"time and rate constants for temporal derivatives / updating of activation state"` + // time and rate constants for temporal derivatives / updating of activation state + Dt DtParams `view:"inline"` - // [view: inline] [Defaults: 1, .2, 1, 1] maximal conductances levels for channels - Gbar chans.Chans `view:"inline" desc:"[Defaults: 1, .2, 1, 1] maximal conductances levels for channels"` + // maximal conductances levels for channels + Gbar chans.Chans `view:"inline"` - // [view: inline] [Defaults: 1, .3, .25, .1] reversal potentials for each channel - Erev chans.Chans `view:"inline" desc:"[Defaults: 1, .3, .25, .1] reversal potentials for each channel"` + // reversal potentials for each channel + Erev chans.Chans `view:"inline"` - // [view: inline] how external inputs drive neural activations - Clamp ClampParams `view:"inline" desc:"how external inputs drive neural activations"` + // how external inputs drive neural activations + Clamp ClampParams `view:"inline"` - // [view: inline] how, where, when, and how much noise to add - Noise SpikeNoiseParams `view:"inline" desc:"how, where, when, and how much noise to add"` + // how, where, when, and how much noise to add + Noise SpikeNoiseParams `view:"inline"` - // [view: inline] range for Vm membrane potential -- [0.1, 1.0] -- important to keep just at extreme range of reversal potentials to prevent numerical instability - VmRange minmax.F32 `view:"inline" desc:"range for Vm membrane potential -- [0.1, 1.0] -- important to keep just at extreme range of reversal potentials to prevent numerical instability"` + // range for Vm membrane potential -- -- important to keep just at extreme range of reversal potentials to prevent numerical instability + VmRange minmax.F32 `view:"inline"` - // [view: inline] M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes - Mahp chans.MahpParams `view:"inline" desc:"M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes"` + // M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes + Mahp chans.MahpParams `view:"inline"` - // [view: inline] slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron - Sahp chans.SahpParams `view:"inline" desc:"slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron"` + // slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron + Sahp chans.SahpParams `view:"inline"` - // [view: inline] sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow) - KNa chans.KNaMedSlow `view:"inline" desc:"sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)"` + // sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow) + KNa chans.KNaMedSlow `view:"inline"` - // [view: inline] NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning. - NMDA chans.NMDAParams `view:"inline" desc:"NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning."` + // NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning. + NMDA chans.NMDAParams `view:"inline"` - // [view: inline] NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning. - MaintNMDA chans.NMDAParams `view:"inline" desc:"NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning."` + // NMDA channel parameters used in computing Gnmda conductance for bistability, and postsynaptic calcium flux used in learning. Note that Learn.Snmda has distinct parameters used in computing sending NMDA parameters used in learning. + MaintNMDA chans.NMDAParams `view:"inline"` - // [view: inline] GABA-B / GIRK channel parameters - GabaB chans.GABABParams `view:"inline" desc:"GABA-B / GIRK channel parameters"` + // GABA-B / GIRK channel parameters + GabaB chans.GABABParams `view:"inline"` - // [view: inline] voltage gated calcium channels -- provide a key additional source of Ca for learning and positive-feedback loop upstate for active neurons - VGCC chans.VGCCParams `view:"inline" desc:"voltage gated calcium channels -- provide a key additional source of Ca for learning and positive-feedback loop upstate for active neurons"` + // voltage gated calcium channels -- provide a key additional source of Ca for learning and positive-feedback loop upstate for active neurons + VGCC chans.VGCCParams `view:"inline"` - // [view: inline] A-type potassium (K) channel that is particularly important for limiting the runaway excitation from VGCC channels - AK chans.AKsParams `view:"inline" desc:"A-type potassium (K) channel that is particularly important for limiting the runaway excitation from VGCC channels"` + // A-type potassium (K) channel that is particularly important for limiting the runaway excitation from VGCC channels + AK chans.AKsParams `view:"inline"` - // [view: inline] small-conductance calcium-activated potassium channel produces the pausing function as a consequence of rapid bursting. - SKCa chans.SKCaParams `view:"inline" desc:"small-conductance calcium-activated potassium channel produces the pausing function as a consequence of rapid bursting."` + // small-conductance calcium-activated potassium channel produces the pausing function as a consequence of rapid bursting. + SKCa chans.SKCaParams `view:"inline"` - // [view: inline] Attentional modulation parameters: how Attn modulates Ge - AttnMod AttnParams `view:"inline" desc:"Attentional modulation parameters: how Attn modulates Ge"` + // Attentional modulation parameters: how Attn modulates Ge + AttnMod AttnParams `view:"inline"` - // [view: inline] provides encoding population codes, used to represent a single continuous (scalar) value, across a population of units / neurons (1 dimensional) - PopCode PopCodeParams `view:"inline" desc:"provides encoding population codes, used to represent a single continuous (scalar) value, across a population of units / neurons (1 dimensional)"` + // provides encoding population codes, used to represent a single continuous (scalar) value, across a population of units / neurons (1 dimensional) + PopCode PopCodeParams `view:"inline"` } func (ac *ActParams) Defaults() { diff --git a/axon/act_prjn.go b/axon/act_prjn.go index 41fe2bb2b..f7872ce7a 100644 --- a/axon/act_prjn.go +++ b/axon/act_prjn.go @@ -7,11 +7,11 @@ package axon import ( "log" - "github.com/emer/emergent/erand" + "github.com/emer/emergent/v2/erand" "github.com/goki/gosl/slbool" "github.com/goki/ki/ints" "github.com/goki/ki/kit" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //go:generate stringer -type=PrjnGTypes @@ -63,22 +63,22 @@ const ( type SynComParams struct { // type of conductance (G) communicated by this projection - GType PrjnGTypes `desc:"type of conductance (G) communicated by this projection"` + GType PrjnGTypes - // [def: 2] [min: 0] additional synaptic delay in msec for inputs arriving at this projection. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Prjn in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value. - Delay uint32 `min:"0" def:"2" desc:"additional synaptic delay in msec for inputs arriving at this projection. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Prjn in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value."` + // additional synaptic delay in msec for inputs arriving at this projection. Must be <= MaxDelay which is set during network building based on MaxDelay of any existing Prjn in the network. Delay = 0 means a spike reaches receivers in the next Cycle, which is the minimum time (1 msec). Biologically, subtract 1 from biological synaptic delay values to set corresponding Delay value. + Delay uint32 `min:"0" def:"2"` // maximum value of Delay -- based on MaxDelay values when the BuildGBuf function was called when the network was built -- cannot set it longer than this, except by calling BuildGBuf on network after changing MaxDelay to a larger value in any projection in the network. - MaxDelay uint32 `inactive:"+" desc:"maximum value of Delay -- based on MaxDelay values when the BuildGBuf function was called when the network was built -- cannot set it longer than this, except by calling BuildGBuf on network after changing MaxDelay to a larger value in any projection in the network."` + MaxDelay uint32 `inactive:"+"` // probability of synaptic transmission failure -- if > 0, then weights are turned off at random as a function of PFail (times 1-SWt if PFailSwt) - PFail float32 `desc:"probability of synaptic transmission failure -- if > 0, then weights are turned off at random as a function of PFail (times 1-SWt if PFailSwt)"` + PFail float32 // if true, then probability of failure is inversely proportional to SWt structural / slow weight value (i.e., multiply PFail * (1-SWt))) - PFailSWt slbool.Bool `desc:"if true, then probability of failure is inversely proportional to SWt structural / slow weight value (i.e., multiply PFail * (1-SWt)))"` + PFailSWt slbool.Bool - // [view: -] delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed - DelLen uint32 `view:"-" desc:"delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed"` + // delay length = actual length of the GBuf buffer per neuron = Delay+1 -- just for speed + DelLen uint32 `view:"-"` pad, pad1 float32 } @@ -222,11 +222,11 @@ func (sc *SynComParams) Fail(ctx *Context, syni uint32, swt float32) { // using both absolute and relative factors. type PrjnScaleParams struct { - // [min: 0] [Defaults: Forward=1, Back=0.2] relative scaling that shifts balance between different projections -- this is subject to normalization across all other projections into receiving neuron, and determines the GScale.Target for adapting scaling - Rel float32 `min:"0" desc:"[Defaults: Forward=1, Back=0.2] relative scaling that shifts balance between different projections -- this is subject to normalization across all other projections into receiving neuron, and determines the GScale.Target for adapting scaling"` + // relative scaling that shifts balance between different projections -- this is subject to normalization across all other projections into receiving neuron, and determines the GScale.Target for adapting scaling + Rel float32 `min:"0"` - // [def: 1] [min: 0] absolute multiplier adjustment factor for the prjn scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value - Abs float32 `def:"1" min:"0" desc:"absolute multiplier adjustment factor for the prjn scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value"` + // absolute multiplier adjustment factor for the prjn scaling -- can be used to adjust for idiosyncrasies not accommodated by the standard scaling based on initial target activation level and relative scaling factors -- any adaptation operates by directly adjusting scaling factor from the initially computed value + Abs float32 `def:"1" min:"0"` pad, pad1 float32 } diff --git a/axon/act_test.go b/axon/act_test.go index 51395411f..9d233c97a 100644 --- a/axon/act_test.go +++ b/axon/act_test.go @@ -9,7 +9,7 @@ package axon import ( "testing" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) // TOLERANCE is the numerical difference tolerance for comparing vs. target values diff --git a/axon/axon.go b/axon/axon.go index d96aef570..e811b35da 100644 --- a/axon/axon.go +++ b/axon/axon.go @@ -5,7 +5,7 @@ package axon import ( - "github.com/emer/emergent/emer" + "github.com/emer/emergent/v2/emer" ) // AxonNetwork defines the essential algorithmic API for Axon, at the network level. diff --git a/axon/basic_test.go b/axon/basic_test.go index 1fab1b8c2..caa2a7495 100644 --- a/axon/basic_test.go +++ b/axon/basic_test.go @@ -17,13 +17,13 @@ import ( "strings" "testing" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" "github.com/goki/ki/kit" - "github.com/goki/mat32" + "goki.dev/etable/v2/etensor" + "goki.dev/mat32/v2" "golang.org/x/exp/maps" ) diff --git a/axon/context.go b/axon/context.go index 85e6ea448..4f5239e68 100644 --- a/axon/context.go +++ b/axon/context.go @@ -7,10 +7,10 @@ package axon import ( "math" - "github.com/emer/emergent/etime" + "github.com/emer/emergent/v2/etime" "github.com/goki/gosl/slbool" "github.com/goki/gosl/slrand" - "github.com/goki/ki/bools" + "goki.dev/glop/num" ) var ( @@ -244,50 +244,50 @@ func (ctx *Context) CopyNetStridesFrom(srcCtx *Context) { // NetIdxs are indexes and sizes for processing network type NetIdxs struct { - // [min: 1] number of data parallel items to process currently - NData uint32 `min:"1" desc:"number of data parallel items to process currently"` + // number of data parallel items to process currently + NData uint32 `min:"1"` // network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode - NetIdx uint32 `inactive:"+" desc:"network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode"` + NetIdx uint32 `inactive:"+"` // maximum amount of data parallel - MaxData uint32 `inactive:"+" desc:"maximum amount of data parallel"` + MaxData uint32 `inactive:"+"` // number of layers in the network - NLayers uint32 `inactive:"+" desc:"number of layers in the network"` + NLayers uint32 `inactive:"+"` // total number of neurons - NNeurons uint32 `inactive:"+" desc:"total number of neurons"` + NNeurons uint32 `inactive:"+"` // total number of pools excluding * MaxData factor - NPools uint32 `inactive:"+" desc:"total number of pools excluding * MaxData factor"` + NPools uint32 `inactive:"+"` // total number of synapses - NSyns uint32 `inactive:"+" desc:"total number of synapses"` + NSyns uint32 `inactive:"+"` // maximum size in float32 (4 bytes) of a GPU buffer -- needed for GPU access - GPUMaxBuffFloats uint32 `inactive:"+" desc:"maximum size in float32 (4 bytes) of a GPU buffer -- needed for GPU access"` + GPUMaxBuffFloats uint32 `inactive:"+"` // total number of SynCa banks of GPUMaxBufferBytes arrays in GPU - GPUSynCaBanks uint32 `inactive:"+" desc:"total number of SynCa banks of GPUMaxBufferBytes arrays in GPU"` + GPUSynCaBanks uint32 `inactive:"+"` // total number of PVLV Drives / positive USs - PVLVNPosUSs uint32 `inactive:"+" desc:"total number of PVLV Drives / positive USs"` + PVLVNPosUSs uint32 `inactive:"+"` // total number of PVLV Negative USs - PVLVNNegUSs uint32 `inactive:"+" desc:"total number of PVLV Negative USs"` + PVLVNNegUSs uint32 `inactive:"+"` // offset into GlobalVars for USneg values - GvUSnegOff uint32 `inactive:"+" desc:"offset into GlobalVars for USneg values"` + GvUSnegOff uint32 `inactive:"+"` // stride into GlobalVars for USneg values - GvUSnegStride uint32 `inactive:"+" desc:"stride into GlobalVars for USneg values"` + GvUSnegStride uint32 `inactive:"+"` // offset into GlobalVars for USpos, Drive, VSPatch values values - GvUSposOff uint32 `inactive:"+" desc:"offset into GlobalVars for USpos, Drive, VSPatch values values"` + GvUSposOff uint32 `inactive:"+"` // stride into GlobalVars for USpos, Drive, VSPatch values - GvUSposStride uint32 `inactive:"+" desc:"stride into GlobalVars for USpos, Drive, VSPatch values"` + GvUSposStride uint32 `inactive:"+"` pad uint32 } @@ -349,72 +349,72 @@ func (ctx *NetIdxs) SynIdxIsValid(si uint32) bool { type Context struct { // current evaluation mode, e.g., Train, Test, etc - Mode etime.Modes `desc:"current evaluation mode, e.g., Train, Test, etc"` + Mode etime.Modes // if true, the model is being run in a testing mode, so no weight changes or other associated computations are needed. this flag should only affect learning-related behavior. Is automatically updated based on Mode != Train - Testing slbool.Bool `inactive:"+" desc:"if true, the model is being run in a testing mode, so no weight changes or other associated computations are needed. this flag should only affect learning-related behavior. Is automatically updated based on Mode != Train"` + Testing slbool.Bool `inactive:"+"` // phase counter: typicaly 0-1 for minus-plus but can be more phases for other algorithms - Phase int32 `desc:"phase counter: typicaly 0-1 for minus-plus but can be more phases for other algorithms"` + Phase int32 // true if this is the plus phase, when the outcome / bursting is occurring, driving positive learning -- else minus phase - PlusPhase slbool.Bool `desc:"true if this is the plus phase, when the outcome / bursting is occurring, driving positive learning -- else minus phase"` + PlusPhase slbool.Bool // cycle within current phase -- minus or plus - PhaseCycle int32 `desc:"cycle within current phase -- minus or plus"` + PhaseCycle int32 // cycle counter: number of iterations of activation updating (settling) on the current state -- this counts time sequentially until reset with NewState - Cycle int32 `desc:"cycle counter: number of iterations of activation updating (settling) on the current state -- this counts time sequentially until reset with NewState"` + Cycle int32 - // [def: 200] length of the theta cycle in terms of 1 msec Cycles -- some network update steps depend on doing something at the end of the theta cycle (e.g., CTCtxtPrjn). - ThetaCycles int32 `def:"200" desc:"length of the theta cycle in terms of 1 msec Cycles -- some network update steps depend on doing something at the end of the theta cycle (e.g., CTCtxtPrjn)."` + // length of the theta cycle in terms of 1 msec Cycles -- some network update steps depend on doing something at the end of the theta cycle (e.g., CTCtxtPrjn). + ThetaCycles int32 `def:"200"` // total cycle count -- increments continuously from whenever it was last reset -- typically this is number of milliseconds in simulation time -- is int32 and not uint32 b/c used with Synapse CaUpT which needs to have a -1 case for expired update time - CyclesTotal int32 `desc:"total cycle count -- increments continuously from whenever it was last reset -- typically this is number of milliseconds in simulation time -- is int32 and not uint32 b/c used with Synapse CaUpT which needs to have a -1 case for expired update time"` + CyclesTotal int32 // accumulated amount of time the network has been running, in simulation-time (not real world time), in seconds - Time float32 `desc:"accumulated amount of time the network has been running, in simulation-time (not real world time), in seconds"` + Time float32 // total trial count -- increments continuously in NewState call *only in Train mode* from whenever it was last reset -- can be used for synchronizing weight updates across nodes - TrialsTotal int32 `desc:"total trial count -- increments continuously in NewState call *only in Train mode* from whenever it was last reset -- can be used for synchronizing weight updates across nodes"` + TrialsTotal int32 - // [def: 0.001] amount of time to increment per cycle - TimePerCycle float32 `def:"0.001" desc:"amount of time to increment per cycle"` + // amount of time to increment per cycle + TimePerCycle float32 `def:"0.001"` - // [def: 100] how frequently to perform slow adaptive processes such as synaptic scaling, inhibition adaptation, associated in the brain with sleep, in the SlowAdapt method. This should be long enough for meaningful changes to accumulate -- 100 is default but could easily be longer in larger models. Because SlowCtr is incremented by NData, high NData cases (e.g. 16) likely need to increase this value -- e.g., 400 seems to produce overall consistent results in various models. - SlowInterval int32 `def:"100" desc:"how frequently to perform slow adaptive processes such as synaptic scaling, inhibition adaptation, associated in the brain with sleep, in the SlowAdapt method. This should be long enough for meaningful changes to accumulate -- 100 is default but could easily be longer in larger models. Because SlowCtr is incremented by NData, high NData cases (e.g. 16) likely need to increase this value -- e.g., 400 seems to produce overall consistent results in various models."` + // how frequently to perform slow adaptive processes such as synaptic scaling, inhibition adaptation, associated in the brain with sleep, in the SlowAdapt method. This should be long enough for meaningful changes to accumulate -- 100 is default but could easily be longer in larger models. Because SlowCtr is incremented by NData, high NData cases (e.g. 16) likely need to increase this value -- e.g., 400 seems to produce overall consistent results in various models. + SlowInterval int32 `def:"100"` // counter for how long it has been since last SlowAdapt step. Note that this is incremented by NData to maintain consistency across different values of this parameter. - SlowCtr int32 `inactive:"+" desc:"counter for how long it has been since last SlowAdapt step. Note that this is incremented by NData to maintain consistency across different values of this parameter."` + SlowCtr int32 `inactive:"+"` // synaptic calcium counter, which drives the CaUpT synaptic value to optimize updating of this computationally expensive factor. It is incremented by 1 for each cycle, and reset at the SlowInterval, at which point the synaptic calcium values are all reset. - SynCaCtr float32 `inactive:"+" desc:"synaptic calcium counter, which drives the CaUpT synaptic value to optimize updating of this computationally expensive factor. It is incremented by 1 for each cycle, and reset at the SlowInterval, at which point the synaptic calcium values are all reset."` + SynCaCtr float32 `inactive:"+"` pad, pad1 float32 - // [view: inline] indexes and sizes of current network - NetIdxs NetIdxs `view:"inline" desc:"indexes and sizes of current network"` + // indexes and sizes of current network + NetIdxs NetIdxs `view:"inline"` - // [view: -] stride offsets for accessing neuron variables - NeuronVars NeuronVarStrides `view:"-" desc:"stride offsets for accessing neuron variables"` + // stride offsets for accessing neuron variables + NeuronVars NeuronVarStrides `view:"-"` - // [view: -] stride offsets for accessing neuron average variables - NeuronAvgVars NeuronAvgVarStrides `view:"-" desc:"stride offsets for accessing neuron average variables"` + // stride offsets for accessing neuron average variables + NeuronAvgVars NeuronAvgVarStrides `view:"-"` - // [view: -] stride offsets for accessing neuron indexes - NeuronIdxs NeuronIdxStrides `view:"-" desc:"stride offsets for accessing neuron indexes"` + // stride offsets for accessing neuron indexes + NeuronIdxs NeuronIdxStrides `view:"-"` - // [view: -] stride offsets for accessing synapse variables - SynapseVars SynapseVarStrides `view:"-" desc:"stride offsets for accessing synapse variables"` + // stride offsets for accessing synapse variables + SynapseVars SynapseVarStrides `view:"-"` - // [view: -] stride offsets for accessing synapse Ca variables - SynapseCaVars SynapseCaStrides `view:"-" desc:"stride offsets for accessing synapse Ca variables"` + // stride offsets for accessing synapse Ca variables + SynapseCaVars SynapseCaStrides `view:"-"` - // [view: -] stride offsets for accessing synapse indexes - SynapseIdxs SynapseIdxStrides `view:"-" desc:"stride offsets for accessing synapse indexes"` + // stride offsets for accessing synapse indexes + SynapseIdxs SynapseIdxStrides `view:"-"` // random counter -- incremented by maximum number of possible random numbers generated per cycle, regardless of how many are actually used -- this is shared across all layers so must encompass all possible param settings. - RandCtr slrand.Counter `desc:"random counter -- incremented by maximum number of possible random numbers generated per cycle, regardless of how many are actually used -- this is shared across all layers so must encompass all possible param settings."` + RandCtr slrand.Counter } // Defaults sets default values @@ -742,7 +742,7 @@ func GlobalsReset(ctx *Context) { // GlobalSetRew is a convenience function for setting the external reward // state in Globals variables func GlobalSetRew(ctx *Context, di uint32, rew float32, hasRew bool) { - SetGlbV(ctx, di, GvHasRew, bools.ToFloat32(hasRew)) + SetGlbV(ctx, di, GvHasRew, num.FromBool[float32](hasRew)) if hasRew { SetGlbV(ctx, di, GvRew, rew) } else { diff --git a/axon/deep_layers.go b/axon/deep_layers.go index 93232381a..489efc93f 100644 --- a/axon/deep_layers.go +++ b/axon/deep_layers.go @@ -5,8 +5,8 @@ package axon import ( - "github.com/emer/emergent/params" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/params" + "goki.dev/mat32/v2" ) //gosl: start deep_layers @@ -15,11 +15,11 @@ import ( // CaSpkP integrated spiking values in Super layers -- thresholded. type BurstParams struct { - // [def: 0.1] [max: 1] Relative component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). This is the distance between the average and maximum activation values within layer (e.g., 0 = average, 1 = max). Overall effective threshold is MAX of relative and absolute thresholds. - ThrRel float32 `max:"1" def:"0.1" desc:"Relative component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). This is the distance between the average and maximum activation values within layer (e.g., 0 = average, 1 = max). Overall effective threshold is MAX of relative and absolute thresholds."` + // Relative component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). This is the distance between the average and maximum activation values within layer (e.g., 0 = average, 1 = max). Overall effective threshold is MAX of relative and absolute thresholds. + ThrRel float32 `max:"1" def:"0.1"` - // [def: 0.1] [min: 0] [max: 1] Absolute component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). Overall effective threshold is MAX of relative and absolute thresholds. - ThrAbs float32 `min:"0" max:"1" def:"0.1" desc:"Absolute component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). Overall effective threshold is MAX of relative and absolute thresholds."` + // Absolute component of threshold on superficial activation value, below which it does not drive Burst (and above which, Burst = CaSpkP). Overall effective threshold is MAX of relative and absolute thresholds. + ThrAbs float32 `min:"0" max:"1" def:"0.1"` pad, pad1 float32 } @@ -42,14 +42,14 @@ func (bp *BurstParams) ThrFmAvgMax(avg, mx float32) float32 { // CTParams control the CT corticothalamic neuron special behavior type CTParams struct { - // [def: 0.05,0.1,1,2] gain factor for context excitatory input, which is constant as compared to the spiking input from other projections, so it must be downscaled accordingly. This can make a difference and may need to be scaled up or down. - GeGain float32 `def:"0.05,0.1,1,2" desc:"gain factor for context excitatory input, which is constant as compared to the spiking input from other projections, so it must be downscaled accordingly. This can make a difference and may need to be scaled up or down."` + // gain factor for context excitatory input, which is constant as compared to the spiking input from other projections, so it must be downscaled accordingly. This can make a difference and may need to be scaled up or down. + GeGain float32 `def:"0.05,0.1,1,2"` - // [def: 0,50] decay time constant for context Ge input -- if > 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50 - DecayTau float32 `def:"0,50" desc:"decay time constant for context Ge input -- if > 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50"` + // decay time constant for context Ge input -- if > 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50 + DecayTau float32 `def:"0,50"` - // [view: -] 1 / tau - DecayDt float32 `view:"-" json:"-" xml:"-" desc:"1 / tau"` + // 1 / tau + DecayDt float32 `view:"-" json:"-" xml:"-"` pad float32 } @@ -73,14 +73,14 @@ func (cp *CTParams) Defaults() { // the corresponding driver neuron Burst activation (or CaSpkP if not Super) type PulvParams struct { - // [def: 0.1] [min: 0.0] multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit. - DriveScale float32 `def:"0.1" min:"0.0" desc:"multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit."` + // multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit. + DriveScale float32 `def:"0.1" min:"0.0"` - // [def: 0.6] [min: 0.01] Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning. - FullDriveAct float32 `def:"0.6" min:"0.01" desc:"Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning."` + // Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning. + FullDriveAct float32 `def:"0.6" min:"0.01"` // index of layer that generates the driving activity into this one -- set via SetBuildConfig(DriveLayName) setting - DriveLayIdx int32 `inactive:"+" desc:"index of layer that generates the driving activity into this one -- set via SetBuildConfig(DriveLayName) setting"` + DriveLayIdx int32 `inactive:"+"` pad float32 } diff --git a/axon/deep_net.go b/axon/deep_net.go index 6baa5f15c..cb81fd49e 100644 --- a/axon/deep_net.go +++ b/axon/deep_net.go @@ -8,9 +8,9 @@ import ( "fmt" "strings" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" "golang.org/x/exp/maps" ) diff --git a/axon/gpu.go b/axon/gpu.go index cd599dda0..a9fa6e7a6 100644 --- a/axon/gpu.go +++ b/axon/gpu.go @@ -10,10 +10,10 @@ import ( "math" "unsafe" - "github.com/emer/empi/mpi" - "github.com/goki/gi/oswin" + "github.com/emer/empi/v2/mpi" "github.com/goki/vgpu/vgpu" vk "github.com/goki/vulkan" + "goki.dev/goosi" ) //go:embed shaders/*.spv @@ -116,7 +116,7 @@ const CyclesN = 10 type PushOff struct { // offset - Off uint32 `desc:"offset"` + Off uint32 pad, pad1, pad2 uint32 } @@ -126,71 +126,71 @@ type PushOff struct { type GPU struct { // if true, actually use the GPU - On bool `desc:"if true, actually use the GPU"` + On bool RecFunTimes bool `desc:"if true, slower separate shader pipeline runs are used, with a CPU-sync Wait at the end, to enable timing information about each individual shader to be collected using the network FunTimer system. otherwise, only aggregate information is available about the entire Cycle call.` // if true, process each cycle one at a time. Otherwise, 10 cycles at a time are processed in one batch. - CycleByCycle bool `desc:"if true, process each cycle one at a time. Otherwise, 10 cycles at a time are processed in one batch."` + CycleByCycle bool - // [view: -] the network we operate on -- we live under this net - Net *Network `view:"-" desc:"the network we operate on -- we live under this net"` + // the network we operate on -- we live under this net + Net *Network `view:"-"` - // [view: -] the context we use - Ctx *Context `view:"-" desc:"the context we use"` + // the context we use + Ctx *Context `view:"-"` - // [view: -] the vgpu compute system - Sys *vgpu.System `view:"-" desc:"the vgpu compute system"` + // the vgpu compute system + Sys *vgpu.System `view:"-"` - // [view: -] VarSet = 0: the uniform LayerParams - Params *vgpu.VarSet `view:"-" desc:"VarSet = 0: the uniform LayerParams"` + // VarSet = 0: the uniform LayerParams + Params *vgpu.VarSet `view:"-"` - // [view: -] VarSet = 1: the storage indexes and PrjnParams - Idxs *vgpu.VarSet `view:"-" desc:"VarSet = 1: the storage indexes and PrjnParams"` + // VarSet = 1: the storage indexes and PrjnParams + Idxs *vgpu.VarSet `view:"-"` - // [view: -] VarSet = 2: the Storage buffer for RW state structs and neuron floats - Structs *vgpu.VarSet `view:"-" desc:"VarSet = 2: the Storage buffer for RW state structs and neuron floats"` + // VarSet = 2: the Storage buffer for RW state structs and neuron floats + Structs *vgpu.VarSet `view:"-"` - // [view: -] Varset = 3: the Storage buffer for synapses - Syns *vgpu.VarSet `view:"-" desc:"Varset = 3: the Storage buffer for synapses"` + // Varset = 3: the Storage buffer for synapses + Syns *vgpu.VarSet `view:"-"` - // [view: -] Varset = 4: the Storage buffer for SynCa banks - SynCas *vgpu.VarSet `view:"-" desc:"Varset = 4: the Storage buffer for SynCa banks"` + // Varset = 4: the Storage buffer for SynCa banks + SynCas *vgpu.VarSet `view:"-"` - // [view: -] for sequencing commands - Semaphores map[string]vk.Semaphore `view:"-" desc:"for sequencing commands"` + // for sequencing commands + Semaphores map[string]vk.Semaphore `view:"-"` - // [def: 64] [view: -] number of warp threads -- typically 64 -- must update all hlsl files if changed! - NThreads int `view:"-" inactive:"-" def:"64" desc:"number of warp threads -- typically 64 -- must update all hlsl files if changed!"` + // number of warp threads -- typically 64 -- must update all hlsl files if changed! + NThreads int `view:"-" inactive:"-" def:"64"` - // [view: -] maximum number of bytes per individual storage buffer element, from GPUProps.Limits.MaxStorageBufferRange - MaxBufferBytes uint32 `view:"-" desc:"maximum number of bytes per individual storage buffer element, from GPUProps.Limits.MaxStorageBufferRange"` + // maximum number of bytes per individual storage buffer element, from GPUProps.Limits.MaxStorageBufferRange + MaxBufferBytes uint32 `view:"-"` - // [view: -] bank of floats for GPU access - SynapseCas0 []float32 `view:"-" desc:"bank of floats for GPU access"` + // bank of floats for GPU access + SynapseCas0 []float32 `view:"-"` - // [view: -] bank of floats for GPU access - SynapseCas1 []float32 `view:"-" desc:"bank of floats for GPU access"` + // bank of floats for GPU access + SynapseCas1 []float32 `view:"-"` - // [view: -] bank of floats for GPU access - SynapseCas2 []float32 `view:"-" desc:"bank of floats for GPU access"` + // bank of floats for GPU access + SynapseCas2 []float32 `view:"-"` - // [view: -] bank of floats for GPU access - SynapseCas3 []float32 `view:"-" desc:"bank of floats for GPU access"` + // bank of floats for GPU access + SynapseCas3 []float32 `view:"-"` - // [view: -] bank of floats for GPU access - SynapseCas4 []float32 `view:"-" desc:"bank of floats for GPU access"` + // bank of floats for GPU access + SynapseCas4 []float32 `view:"-"` - // [view: -] bank of floats for GPU access - SynapseCas5 []float32 `view:"-" desc:"bank of floats for GPU access"` + // bank of floats for GPU access + SynapseCas5 []float32 `view:"-"` - // [view: -] bank of floats for GPU access - SynapseCas6 []float32 `view:"-" desc:"bank of floats for GPU access"` + // bank of floats for GPU access + SynapseCas6 []float32 `view:"-"` - // [view: -] bank of floats for GPU access - SynapseCas7 []float32 `view:"-" desc:"bank of floats for GPU access"` + // bank of floats for GPU access + SynapseCas7 []float32 `view:"-"` - // [view: -] tracks var binding - DidBind map[string]bool `view:"-" desc:"tracks var binding"` + // tracks var binding + DidBind map[string]bool `view:"-"` } // ConfigGPUwithGUI turns on GPU mode in context of an active GUI where Vulkan @@ -198,7 +198,7 @@ type GPU struct { // Configures the GPU -- call after Network is Built, initialized, params are set, // and everything is ready to run. func (nt *Network) ConfigGPUwithGUI(ctx *Context) { - oswin.TheApp.RunOnMain(func() { + goosi.TheApp.RunOnMain(func() { nt.GPU.Config(ctx, nt) }) fmt.Printf("Running on GPU: %s\n", TheGPU.DeviceName) diff --git a/axon/helpers.go b/axon/helpers.go index 66d67394e..81e9a9584 100644 --- a/axon/helpers.go +++ b/axon/helpers.go @@ -7,9 +7,9 @@ package axon import ( "fmt" - "github.com/emer/emergent/ecmd" - "github.com/emer/empi/mpi" - "github.com/goki/gi/gi" + "github.com/emer/emergent/v2/ecmd" + "github.com/emer/empi/v2/mpi" + "goki.dev/gi/v2/gi" ) //////////////////////////////////////////////////// diff --git a/axon/hip_net.go b/axon/hip_net.go index 5d1d0e358..dfb40b37b 100644 --- a/axon/hip_net.go +++ b/axon/hip_net.go @@ -5,79 +5,79 @@ package axon import ( - "github.com/emer/emergent/emer" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/evec" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/prjn" - "github.com/emer/etable/norm" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/evec" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/prjn" + "goki.dev/etable/v2/norm" ) // HipConfig have the hippocampus size and connectivity parameters type HipConfig struct { // size of EC2 - EC2Size evec.Vec2i `nest:"+" desc:"size of EC2"` + EC2Size evec.Vec2i `nest:"+"` // number of EC3 pools (outer dimension) - EC3NPool evec.Vec2i `nest:"+" desc:"number of EC3 pools (outer dimension)"` + EC3NPool evec.Vec2i `nest:"+"` // number of neurons in one EC3 pool - EC3NNrn evec.Vec2i `nest:"+" desc:"number of neurons in one EC3 pool"` + EC3NNrn evec.Vec2i `nest:"+"` // number of neurons in one CA1 pool - CA1NNrn evec.Vec2i `nest:"+" desc:"number of neurons in one CA1 pool"` + CA1NNrn evec.Vec2i `nest:"+"` // size of CA3 - CA3Size evec.Vec2i `nest:"+" desc:"size of CA3"` + CA3Size evec.Vec2i `nest:"+"` - // [def: 2.236] size of DG / CA3 - DGRatio float32 `def:"2.236" desc:"size of DG / CA3"` + // size of DG / CA3 + DGRatio float32 `def:"2.236"` - // [def: 0.1] percent connectivity from EC3 to EC2 - EC3ToEC2PCon float32 `def:"0.1" desc:"percent connectivity from EC3 to EC2"` + // percent connectivity from EC3 to EC2 + EC3ToEC2PCon float32 `def:"0.1"` - // [def: 0.25] percent connectivity from EC2 to DG - EC2ToDGPCon float32 `def:"0.25" desc:"percent connectivity from EC2 to DG"` + // percent connectivity from EC2 to DG + EC2ToDGPCon float32 `def:"0.25"` - // [def: 0.25] percent connectivity from EC2 to CA3 - EC2ToCA3PCon float32 `def:"0.25" desc:"percent connectivity from EC2 to CA3"` + // percent connectivity from EC2 to CA3 + EC2ToCA3PCon float32 `def:"0.25"` - // [def: 0.25] percent connectivity from CA3 to CA1 - CA3ToCA1PCon float32 `def:"0.25" desc:"percent connectivity from CA3 to CA1"` + // percent connectivity from CA3 to CA1 + CA3ToCA1PCon float32 `def:"0.25"` - // [def: 0.02] percent connectivity into CA3 from DG - DGToCA3PCon float32 `def:"0.02" desc:"percent connectivity into CA3 from DG"` + // percent connectivity into CA3 from DG + DGToCA3PCon float32 `def:"0.02"` // lateral radius of connectivity in EC2 - EC2LatRadius int `desc:"lateral radius of connectivity in EC2"` + EC2LatRadius int // lateral gaussian sigma in EC2 for how quickly weights fall off with distance - EC2LatSigma float32 `desc:"lateral gaussian sigma in EC2 for how quickly weights fall off with distance"` + EC2LatSigma float32 - // [def: 1] proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc - MossyDelta float32 `def:"1" desc:"proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc"` + // proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in training, applied at the start of a trial to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc + MossyDelta float32 `def:"1"` - // [def: 0.75] proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc - MossyDeltaTest float32 `def:"0.75" desc:"proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc"` + // proportion of full mossy fiber strength (PrjnScale.Rel) for CA3 EDL in testing, applied during 2nd-3rd quarters to reduce DG -> CA3 strength. 1 = fully reduce strength, .5 = 50% reduction, etc + MossyDeltaTest float32 `def:"0.75"` - // [def: 0.9] low theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model - ThetaLow float32 `def:"0.9" desc:"low theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model"` + // low theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model + ThetaLow float32 `def:"0.9"` - // [def: 1] high theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model - ThetaHigh float32 `def:"1" desc:"high theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model"` + // high theta modulation value for temporal difference EDL -- sets PrjnScale.Rel on CA1 <-> EC prjns consistent with Theta phase model + ThetaHigh float32 `def:"1"` - // [def: true] flag for clamping the EC5 from EC5ClampSrc - EC5Clamp bool `def:"true" desc:"flag for clamping the EC5 from EC5ClampSrc"` + // flag for clamping the EC5 from EC5ClampSrc + EC5Clamp bool `def:"true"` - // [def: EC3] source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available - EC5ClampSrc string `def:"EC3" desc:"source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available"` + // source layer for EC5 clamping activations in the plus phase -- biologically it is EC3 but can use an Input layer if available + EC5ClampSrc string `def:"EC3"` - // [def: true] clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there - EC5ClampTest bool `def:"true" desc:"clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there"` + // clamp the EC5 from EC5ClampSrc during testing as well as training -- this will overwrite any target values that might be used in stats (e.g., in the basic hip example), so it must be turned off there + EC5ClampTest bool `def:"true"` - // [def: 0.1] threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization. - EC5ClampThr float32 `def:"0.1" desc:"threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization."` + // threshold for binarizing EC5 clamp values -- any value above this is clamped to 1, else 0 -- helps produce a cleaner learning signal. Set to 0 to not perform any binarization. + EC5ClampThr float32 `def:"0.1"` } func (hip *HipConfig) Defaults() { diff --git a/axon/inhib.go b/axon/inhib.go index 9e5707f80..4364298ff 100644 --- a/axon/inhib.go +++ b/axon/inhib.go @@ -7,7 +7,7 @@ package axon import ( "github.com/emer/axon/fsfffb" "github.com/goki/gosl/slbool" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //gosl: hlsl inhib @@ -24,23 +24,23 @@ import ( // average activity within a target range. type ActAvgParams struct { - // [min: 0] [step: 0.01] [typically 0.01 - 0.2] nominal estimated average activity level in the layer, which is used in computing the scaling factor on sending projections from this layer. In general it should roughly match the layer ActAvg.ActMAvg value, which can be logged using the axon.LogAddDiagnosticItems function. If layers receiving from this layer are not getting enough Ge excitation, then this Nominal level can be lowered to increase projection strength (fewer active neurons means each one contributes more, so scaling factor goes as the inverse of activity level), or vice-versa if Ge is too high. It is also the basis for the target activity level used for the AdaptGi option -- see the Offset which is added to this value. - Nominal float32 `min:"0" step:"0.01" desc:"[typically 0.01 - 0.2] nominal estimated average activity level in the layer, which is used in computing the scaling factor on sending projections from this layer. In general it should roughly match the layer ActAvg.ActMAvg value, which can be logged using the axon.LogAddDiagnosticItems function. If layers receiving from this layer are not getting enough Ge excitation, then this Nominal level can be lowered to increase projection strength (fewer active neurons means each one contributes more, so scaling factor goes as the inverse of activity level), or vice-versa if Ge is too high. It is also the basis for the target activity level used for the AdaptGi option -- see the Offset which is added to this value."` + // nominal estimated average activity level in the layer, which is used in computing the scaling factor on sending projections from this layer. In general it should roughly match the layer ActAvg.ActMAvg value, which can be logged using the axon.LogAddDiagnosticItems function. If layers receiving from this layer are not getting enough Ge excitation, then this Nominal level can be lowered to increase projection strength (fewer active neurons means each one contributes more, so scaling factor goes as the inverse of activity level), or vice-versa if Ge is too high. It is also the basis for the target activity level used for the AdaptGi option -- see the Offset which is added to this value. + Nominal float32 `min:"0" step:"0.01"` // enable adapting of layer inhibition Gi multiplier factor (stored in layer GiMult value) to maintain a Target layer level of ActAvg.ActMAvg. This generally works well and improves the long-term stability of the models. It is not enabled by default because it depends on having established a reasonable Nominal + Offset target activity level. - AdaptGi slbool.Bool `desc:"enable adapting of layer inhibition Gi multiplier factor (stored in layer GiMult value) to maintain a Target layer level of ActAvg.ActMAvg. This generally works well and improves the long-term stability of the models. It is not enabled by default because it depends on having established a reasonable Nominal + Offset target activity level."` + AdaptGi slbool.Bool - // [def: 0] [viewif: AdaptGi] [min: 0] [step: 0.01] offset to add to Nominal for the target average activity that drives adaptation of Gi for this layer. Typically the Nominal level is good, but sometimes Nominal must be adjusted up or down to achieve desired Ge scaling, so this Offset can compensate accordingly. - Offset float32 `def:"0" min:"0" step:"0.01" viewif:"AdaptGi" desc:"offset to add to Nominal for the target average activity that drives adaptation of Gi for this layer. Typically the Nominal level is good, but sometimes Nominal must be adjusted up or down to achieve desired Ge scaling, so this Offset can compensate accordingly."` + // offset to add to Nominal for the target average activity that drives adaptation of Gi for this layer. Typically the Nominal level is good, but sometimes Nominal must be adjusted up or down to achieve desired Ge scaling, so this Offset can compensate accordingly. + Offset float32 `def:"0" min:"0" step:"0.01" viewif:"AdaptGi"` - // [def: 0] [viewif: AdaptGi] tolerance for higher than Target target average activation as a proportion of that target value (0 = exactly the target, 0.2 = 20% higher than target) -- only once activations move outside this tolerance are inhibitory values adapted. - HiTol float32 `def:"0" viewif:"AdaptGi" desc:"tolerance for higher than Target target average activation as a proportion of that target value (0 = exactly the target, 0.2 = 20% higher than target) -- only once activations move outside this tolerance are inhibitory values adapted."` + // tolerance for higher than Target target average activation as a proportion of that target value (0 = exactly the target, 0.2 = 20% higher than target) -- only once activations move outside this tolerance are inhibitory values adapted. + HiTol float32 `def:"0" viewif:"AdaptGi"` - // [def: 0.8] [viewif: AdaptGi] tolerance for lower than Target target average activation as a proportion of that target value (0 = exactly the target, 0.5 = 50% lower than target) -- only once activations move outside this tolerance are inhibitory values adapted. - LoTol float32 `def:"0.8" viewif:"AdaptGi" desc:"tolerance for lower than Target target average activation as a proportion of that target value (0 = exactly the target, 0.5 = 50% lower than target) -- only once activations move outside this tolerance are inhibitory values adapted."` + // tolerance for lower than Target target average activation as a proportion of that target value (0 = exactly the target, 0.5 = 50% lower than target) -- only once activations move outside this tolerance are inhibitory values adapted. + LoTol float32 `def:"0.8" viewif:"AdaptGi"` - // [def: 0.1] [viewif: AdaptGi] rate of Gi adaptation as function of AdaptRate * (Target - ActMAvg) / Target -- occurs at spaced intervals determined by Network.SlowInterval value -- slower values such as 0.01 may be needed for large networks and sparse layers. - AdaptRate float32 `def:"0.1" viewif:"AdaptGi" desc:"rate of Gi adaptation as function of AdaptRate * (Target - ActMAvg) / Target -- occurs at spaced intervals determined by Network.SlowInterval value -- slower values such as 0.01 may be needed for large networks and sparse layers."` + // rate of Gi adaptation as function of AdaptRate * (Target - ActMAvg) / Target -- occurs at spaced intervals determined by Network.SlowInterval value -- slower values such as 0.01 may be needed for large networks and sparse layers. + AdaptRate float32 `def:"0.1" viewif:"AdaptGi"` pad, pad1 float32 } @@ -85,31 +85,31 @@ func (aa *ActAvgParams) Adapt(gimult *float32, act float32) bool { type TopoInhibParams struct { // use topographic inhibition - On slbool.Bool `desc:"use topographic inhibition"` + On slbool.Bool - // [viewif: On] half-width of topographic inhibition within layer - Width int32 `viewif:"On" desc:"half-width of topographic inhibition within layer"` + // half-width of topographic inhibition within layer + Width int32 `viewif:"On"` - // [viewif: On] normalized gaussian sigma as proportion of Width, for gaussian weighting - Sigma float32 `viewif:"On" desc:"normalized gaussian sigma as proportion of Width, for gaussian weighting"` + // normalized gaussian sigma as proportion of Width, for gaussian weighting + Sigma float32 `viewif:"On"` - // [viewif: On] half-width of topographic inhibition within layer - Wrap slbool.Bool `viewif:"On" desc:"half-width of topographic inhibition within layer"` + // half-width of topographic inhibition within layer + Wrap slbool.Bool `viewif:"On"` - // [viewif: On] overall inhibition multiplier for topographic inhibition (generally <= 1) - Gi float32 `viewif:"On" desc:"overall inhibition multiplier for topographic inhibition (generally <= 1)"` + // overall inhibition multiplier for topographic inhibition (generally <= 1) + Gi float32 `viewif:"On"` - // [viewif: On] overall inhibitory contribution from feedforward inhibition -- multiplies average Ge from pools or Ge from neurons - FF float32 `viewif:"On" desc:"overall inhibitory contribution from feedforward inhibition -- multiplies average Ge from pools or Ge from neurons"` + // overall inhibitory contribution from feedforward inhibition -- multiplies average Ge from pools or Ge from neurons + FF float32 `viewif:"On"` - // [viewif: On] overall inhibitory contribution from feedback inhibition -- multiplies average activation from pools or Act from neurons - FB float32 `viewif:"On" desc:"overall inhibitory contribution from feedback inhibition -- multiplies average activation from pools or Act from neurons"` + // overall inhibitory contribution from feedback inhibition -- multiplies average activation from pools or Act from neurons + FB float32 `viewif:"On"` - // [viewif: On] feedforward zero point for Ge per neuron (summed Ge is compared to N * FF0) -- below this level, no FF inhibition is computed, above this it is FF * (Sum Ge - N * FF0) - FF0 float32 `viewif:"On" desc:"feedforward zero point for Ge per neuron (summed Ge is compared to N * FF0) -- below this level, no FF inhibition is computed, above this it is FF * (Sum Ge - N * FF0)"` + // feedforward zero point for Ge per neuron (summed Ge is compared to N * FF0) -- below this level, no FF inhibition is computed, above this it is FF * (Sum Ge - N * FF0) + FF0 float32 `viewif:"On"` // weight value at width -- to assess the value of Sigma - WidthWt float32 `inactive:"+" desc:"weight value at width -- to assess the value of Sigma"` + WidthWt float32 `inactive:"+"` pad, pad1, pad2 float32 } @@ -146,14 +146,14 @@ func (ti *TopoInhibParams) GiFmGeAct(ge, act, ff0 float32) float32 { // which is used for Ge rescaling and potentially for adapting inhibition over time type InhibParams struct { - // [view: inline] layer-level and pool-level average activation initial values and updating / adaptation thereof -- initial values help determine initial scaling factors. - ActAvg ActAvgParams `view:"inline" desc:"layer-level and pool-level average activation initial values and updating / adaptation thereof -- initial values help determine initial scaling factors."` + // layer-level and pool-level average activation initial values and updating / adaptation thereof -- initial values help determine initial scaling factors. + ActAvg ActAvgParams `view:"inline"` - // [view: inline] inhibition across the entire layer -- inputs generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers. If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition. - Layer fsfffb.GiParams `view:"inline" desc:"inhibition across the entire layer -- inputs generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers. If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition."` + // inhibition across the entire layer -- inputs generally use Gi = 0.8 or 0.9, 1.3 or higher for sparse layers. If the layer has sub-pools (4D shape) then this is effectively between-pool inhibition. + Layer fsfffb.GiParams `view:"inline"` - // [view: inline] inhibition within sub-pools of units, for layers with 4D shape -- almost always need this if the layer has pools. - Pool fsfffb.GiParams `view:"inline" desc:"inhibition within sub-pools of units, for layers with 4D shape -- almost always need this if the layer has pools."` + // inhibition within sub-pools of units, for layers with 4D shape -- almost always need this if the layer has pools. + Pool fsfffb.GiParams `view:"inline"` } func (ip *InhibParams) Update() { diff --git a/axon/layer.go b/axon/layer.go index 0357b2fb8..0e43aa24b 100644 --- a/axon/layer.go +++ b/axon/layer.go @@ -10,11 +10,11 @@ import ( "math/rand" "strings" - "github.com/emer/emergent/erand" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/erand" "github.com/goki/ki/ints" "github.com/goki/ki/ki" "github.com/goki/ki/kit" + "goki.dev/etable/v2/etensor" ) // index naming: @@ -27,7 +27,7 @@ type Layer struct { LayerBase // all layer-level parameters -- these must remain constant once configured - Params *LayerParams `desc:"all layer-level parameters -- these must remain constant once configured"` + Params *LayerParams } var KiT_Layer = kit.Types.AddType(&Layer{}, LayerProps) diff --git a/axon/layer_compute.go b/axon/layer_compute.go index bd0be08ab..a9af52336 100644 --- a/axon/layer_compute.go +++ b/axon/layer_compute.go @@ -8,8 +8,8 @@ import ( "fmt" "log" - "github.com/emer/etable/minmax" - "github.com/goki/mat32" + "goki.dev/etable/v2/minmax" + "goki.dev/mat32/v2" ) // index naming: diff --git a/axon/layer_test.go b/axon/layer_test.go index 526df87d9..22608730e 100644 --- a/axon/layer_test.go +++ b/axon/layer_test.go @@ -5,10 +5,10 @@ import ( "os" "testing" - "github.com/emer/emergent/prjn" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/prjn" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "goki.dev/etable/v2/etensor" ) func TestLayer(t *testing.T) { diff --git a/axon/layerbase.go b/axon/layerbase.go index e6e228771..d08e0119c 100644 --- a/axon/layerbase.go +++ b/axon/layerbase.go @@ -12,14 +12,14 @@ import ( "math" "strconv" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/params" - "github.com/emer/emergent/relpos" - "github.com/emer/emergent/weights" - "github.com/emer/etable/etensor" - "github.com/goki/gi/giv" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/relpos" + "github.com/emer/emergent/v2/weights" "github.com/goki/ki/indent" - "github.com/goki/mat32" + "goki.dev/etable/v2/etensor" + "goki.dev/gi/v2/giv" + "goki.dev/mat32/v2" ) // LayerBase manages the structural elements of the layer, which are common @@ -30,77 +30,77 @@ import ( // accessed via the AxonLay field. type LayerBase struct { - // [view: -] we need a pointer to ourselves as an AxonLayer (which subsumes emer.Layer), which can always be used to extract the true underlying type of object when layer is embedded in other structs -- function receivers do not have this ability so this is necessary. - AxonLay AxonLayer `copy:"-" json:"-" xml:"-" view:"-" desc:"we need a pointer to ourselves as an AxonLayer (which subsumes emer.Layer), which can always be used to extract the true underlying type of object when layer is embedded in other structs -- function receivers do not have this ability so this is necessary."` + // we need a pointer to ourselves as an AxonLayer (which subsumes emer.Layer), which can always be used to extract the true underlying type of object when layer is embedded in other structs -- function receivers do not have this ability so this is necessary. + AxonLay AxonLayer `copy:"-" json:"-" xml:"-" view:"-"` - // [view: -] our parent network, in case we need to use it to find other layers etc -- set when added by network - Network *Network `copy:"-" json:"-" xml:"-" view:"-" desc:"our parent network, in case we need to use it to find other layers etc -- set when added by network"` + // our parent network, in case we need to use it to find other layers etc -- set when added by network + Network *Network `copy:"-" json:"-" xml:"-" view:"-"` // Name of the layer -- this must be unique within the network, which has a map for quick lookup and layers are typically accessed directly by name - Nm string `desc:"Name of the layer -- this must be unique within the network, which has a map for quick lookup and layers are typically accessed directly by name"` + Nm string // Class is for applying parameter styles, can be space separated multple tags - Cls string `desc:"Class is for applying parameter styles, can be space separated multple tags"` + Cls string // inactivate this layer -- allows for easy experimentation - Off bool `desc:"inactivate this layer -- allows for easy experimentation"` + Off bool // shape of the layer -- can be 2D for basic layers and 4D for layers with sub-groups (hypercolumns) -- order is outer-to-inner (row major) so Y then X for 2D and for 4D: Y-X unit pools then Y-X neurons within pools - Shp etensor.Shape `desc:"shape of the layer -- can be 2D for basic layers and 4D for layers with sub-groups (hypercolumns) -- order is outer-to-inner (row major) so Y then X for 2D and for 4D: Y-X unit pools then Y-X neurons within pools"` + Shp etensor.Shape // type of layer -- Hidden, Input, Target, Compare, or extended type in specialized algorithms -- matches against .Class parameter styles (e.g., .Hidden etc) - Typ LayerTypes `desc:"type of layer -- Hidden, Input, Target, Compare, or extended type in specialized algorithms -- matches against .Class parameter styles (e.g., .Hidden etc)"` + Typ LayerTypes - // [view: inline] [tableview: -] Spatial relationship to other layer, determines positioning - Rel relpos.Rel `tableview:"-" view:"inline" desc:"Spatial relationship to other layer, determines positioning"` + // Spatial relationship to other layer, determines positioning + Rel relpos.Rel `tableview:"-" view:"inline"` - // [tableview: -] position of lower-left-hand corner of layer in 3D space, computed from Rel. Layers are in X-Y width - height planes, stacked vertically in Z axis. - Ps mat32.Vec3 `tableview:"-" desc:"position of lower-left-hand corner of layer in 3D space, computed from Rel. Layers are in X-Y width - height planes, stacked vertically in Z axis."` + // position of lower-left-hand corner of layer in 3D space, computed from Rel. Layers are in X-Y width - height planes, stacked vertically in Z axis. + Ps mat32.Vec3 `tableview:"-"` - // [view: -] a 0..n-1 index of the position of the layer within list of layers in the network. For Axon networks, it only has significance in determining who gets which weights for enforcing initial weight symmetry -- higher layers get weights from lower layers. - Idx int `view:"-" inactive:"-" desc:"a 0..n-1 index of the position of the layer within list of layers in the network. For Axon networks, it only has significance in determining who gets which weights for enforcing initial weight symmetry -- higher layers get weights from lower layers."` + // a 0..n-1 index of the position of the layer within list of layers in the network. For Axon networks, it only has significance in determining who gets which weights for enforcing initial weight symmetry -- higher layers get weights from lower layers. + Idx int `view:"-" inactive:"-"` - // [view: -] number of neurons in the layer - NNeurons uint32 `view:"-" desc:"number of neurons in the layer"` + // number of neurons in the layer + NNeurons uint32 `view:"-"` - // [view: -] starting index of neurons for this layer within the global Network list - NeurStIdx uint32 `view:"-" inactive:"-" desc:"starting index of neurons for this layer within the global Network list"` + // starting index of neurons for this layer within the global Network list + NeurStIdx uint32 `view:"-" inactive:"-"` - // [view: -] number of pools based on layer shape -- at least 1 for layer pool + 4D subpools - NPools uint32 `view:"-" desc:"number of pools based on layer shape -- at least 1 for layer pool + 4D subpools"` + // number of pools based on layer shape -- at least 1 for layer pool + 4D subpools + NPools uint32 `view:"-"` - // [view: -] maximum amount of input data that can be processed in parallel in one pass of the network. Neuron, Pool, Vals storage is allocated to hold this amount. - MaxData uint32 `view:"-" desc:"maximum amount of input data that can be processed in parallel in one pass of the network. Neuron, Pool, Vals storage is allocated to hold this amount."` + // maximum amount of input data that can be processed in parallel in one pass of the network. Neuron, Pool, Vals storage is allocated to hold this amount. + MaxData uint32 `view:"-"` - // [view: -] indexes of representative units in the layer, for computationally expensive stats or displays -- also set RepShp - RepIxs []int `view:"-" desc:"indexes of representative units in the layer, for computationally expensive stats or displays -- also set RepShp"` + // indexes of representative units in the layer, for computationally expensive stats or displays -- also set RepShp + RepIxs []int `view:"-"` - // [view: -] shape of representative units in the layer -- if RepIxs is empty or .Shp is nil, use overall layer shape - RepShp etensor.Shape `view:"-" desc:"shape of representative units in the layer -- if RepIxs is empty or .Shp is nil, use overall layer shape"` + // shape of representative units in the layer -- if RepIxs is empty or .Shp is nil, use overall layer shape + RepShp etensor.Shape `view:"-"` // list of receiving projections into this layer from other layers - RcvPrjns AxonPrjns `desc:"list of receiving projections into this layer from other layers"` + RcvPrjns AxonPrjns // list of sending projections from this layer to other layers - SndPrjns AxonPrjns `desc:"list of sending projections from this layer to other layers"` + SndPrjns AxonPrjns // layer-level state values that are updated during computation -- one for each data parallel -- is a sub-slice of network full set - Vals []LayerVals `desc:"layer-level state values that are updated during computation -- one for each data parallel -- is a sub-slice of network full set"` + Vals []LayerVals // computes FS-FFFB inhibition and other pooled, aggregate state variables -- has at least 1 for entire layer (lpl = layer pool), and one for each sub-pool if shape supports that (4D) * 1 per data parallel (inner loop). This is a sub-slice from overall Network Pools slice. You must iterate over index and use pointer to modify values. - Pools []Pool `desc:"computes FS-FFFB inhibition and other pooled, aggregate state variables -- has at least 1 for entire layer (lpl = layer pool), and one for each sub-pool if shape supports that (4D) * 1 per data parallel (inner loop). This is a sub-slice from overall Network Pools slice. You must iterate over index and use pointer to modify values."` + Pools []Pool - // [view: -] [Neurons][Data] external input values for this layer, allocated from network global Exts slice - Exts []float32 `view:"-" desc:"[Neurons][Data] external input values for this layer, allocated from network global Exts slice"` + // external input values for this layer, allocated from network global Exts slice + Exts []float32 `view:"-"` - // [tableview: -] configuration data set when the network is configured, that is used during the network Build() process via PostBuild method, after all the structure of the network has been fully constructed. In particular, the Params is nil until Build, so setting anything specific in there (e.g., an index to another layer) must be done as a second pass. Note that Params are all applied after Build and can set user-modifiable params, so this is for more special algorithm structural parameters set during ConfigNet() methods., - BuildConfig map[string]string `tableview:"-" desc:"configuration data set when the network is configured, that is used during the network Build() process via PostBuild method, after all the structure of the network has been fully constructed. In particular, the Params is nil until Build, so setting anything specific in there (e.g., an index to another layer) must be done as a second pass. Note that Params are all applied after Build and can set user-modifiable params, so this is for more special algorithm structural parameters set during ConfigNet() methods.,"` + // configuration data set when the network is configured, that is used during the network Build() process via PostBuild method, after all the structure of the network has been fully constructed. In particular, the Params is nil until Build, so setting anything specific in there (e.g., an index to another layer) must be done as a second pass. Note that Params are all applied after Build and can set user-modifiable params, so this is for more special algorithm structural parameters set during ConfigNet() methods., + BuildConfig map[string]string `tableview:"-"` - // [tableview: -] default parameters that are applied prior to user-set parameters -- these are useful for specific layer functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a layer type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map. - DefParams params.Params `tableview:"-" desc:"default parameters that are applied prior to user-set parameters -- these are useful for specific layer functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a layer type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map."` + // default parameters that are applied prior to user-set parameters -- these are useful for specific layer functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a layer type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map. + DefParams params.Params `tableview:"-"` - // [tableview: -] provides a history of parameters applied to the layer - ParamsHistory params.HistoryImpl `tableview:"-" desc:"provides a history of parameters applied to the layer"` + // provides a history of parameters applied to the layer + ParamsHistory params.HistoryImpl `tableview:"-"` } // emer.Layer interface methods diff --git a/axon/layerparams.go b/axon/layerparams.go index e2b126b2b..6849e3458 100644 --- a/axon/layerparams.go +++ b/axon/layerparams.go @@ -7,7 +7,7 @@ package axon import ( "encoding/json" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //gosl: hlsl layerparams @@ -29,46 +29,46 @@ import ( type LayerIdxs struct { // layer index - LayIdx uint32 `inactive:"+" desc:"layer index"` + LayIdx uint32 `inactive:"+"` // maximum number of data parallel elements - MaxData uint32 `inactive:"+" desc:"maximum number of data parallel elements"` + MaxData uint32 `inactive:"+"` // start of pools for this layer -- first one is always the layer-wide pool - PoolSt uint32 `inactive:"+" desc:"start of pools for this layer -- first one is always the layer-wide pool"` + PoolSt uint32 `inactive:"+"` // start of neurons for this layer in global array (same as Layer.NeurStIdx) - NeurSt uint32 `inactive:"+" desc:"start of neurons for this layer in global array (same as Layer.NeurStIdx)"` + NeurSt uint32 `inactive:"+"` // number of neurons in layer - NeurN uint32 `inactive:"+" desc:"number of neurons in layer"` + NeurN uint32 `inactive:"+"` // start index into RecvPrjns global array - RecvSt uint32 `inactive:"+" desc:"start index into RecvPrjns global array"` + RecvSt uint32 `inactive:"+"` // number of recv projections - RecvN uint32 `inactive:"+" desc:"number of recv projections"` + RecvN uint32 `inactive:"+"` // start index into RecvPrjns global array - SendSt uint32 `inactive:"+" desc:"start index into RecvPrjns global array"` + SendSt uint32 `inactive:"+"` // number of recv projections - SendN uint32 `inactive:"+" desc:"number of recv projections"` + SendN uint32 `inactive:"+"` // starting index in network global Exts list of external input for this layer -- only for Input / Target / Compare layer types - ExtsSt uint32 `inactive:"+" desc:"starting index in network global Exts list of external input for this layer -- only for Input / Target / Compare layer types"` + ExtsSt uint32 `inactive:"+"` // layer shape Pools Y dimension -- 1 for 2D - ShpPlY int32 `inactive:"+" desc:"layer shape Pools Y dimension -- 1 for 2D"` + ShpPlY int32 `inactive:"+"` // layer shape Pools X dimension -- 1 for 2D - ShpPlX int32 `inactive:"+" desc:"layer shape Pools X dimension -- 1 for 2D"` + ShpPlX int32 `inactive:"+"` // layer shape Units Y dimension - ShpUnY int32 `inactive:"+" desc:"layer shape Units Y dimension"` + ShpUnY int32 `inactive:"+"` // layer shape Units X dimension - ShpUnX int32 `inactive:"+" desc:"layer shape Units X dimension"` + ShpUnX int32 `inactive:"+"` pad, pad1 uint32 } @@ -95,16 +95,16 @@ func (lx *LayerIdxs) ExtIdx(ni, di uint32) uint32 { type LayerInhibIdxs struct { // idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used - Idx1 int32 `inactive:"+" desc:"idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used"` + Idx1 int32 `inactive:"+"` // idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used - Idx2 int32 `inactive:"+" desc:"idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used"` + Idx2 int32 `inactive:"+"` // idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used - Idx3 int32 `inactive:"+" desc:"idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used"` + Idx3 int32 `inactive:"+"` // idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used - Idx4 int32 `inactive:"+" desc:"idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used"` + Idx4 int32 `inactive:"+"` } // note: the following must appear above LayerParams for GPU usage which is order sensitive @@ -134,60 +134,60 @@ func SetNeuronExtPosNeg(ctx *Context, ni, di uint32, val float32) { type LayerParams struct { // functional type of layer -- determines functional code path for specialized layer types, and is synchronized with the Layer.Typ value - LayType LayerTypes `desc:"functional type of layer -- determines functional code path for specialized layer types, and is synchronized with the Layer.Typ value"` + LayType LayerTypes pad, pad1, pad2 int32 - // [view: add-fields] Activation parameters and methods for computing activations - Acts ActParams `view:"add-fields" desc:"Activation parameters and methods for computing activations"` + // Activation parameters and methods for computing activations + Acts ActParams `view:"add-fields"` - // [view: add-fields] Inhibition parameters and methods for computing layer-level inhibition - Inhib InhibParams `view:"add-fields" desc:"Inhibition parameters and methods for computing layer-level inhibition"` + // Inhibition parameters and methods for computing layer-level inhibition + Inhib InhibParams `view:"add-fields"` - // [view: inline] indexes of layers that contribute between-layer inhibition to this layer -- set these indexes via BuildConfig LayInhibXName (X = 1, 2...) - LayInhib LayerInhibIdxs `view:"inline" desc:"indexes of layers that contribute between-layer inhibition to this layer -- set these indexes via BuildConfig LayInhibXName (X = 1, 2...)"` + // indexes of layers that contribute between-layer inhibition to this layer -- set these indexes via BuildConfig LayInhibXName (X = 1, 2...) + LayInhib LayerInhibIdxs `view:"inline"` - // [view: add-fields] Learning parameters and methods that operate at the neuron level - Learn LearnNeurParams `view:"add-fields" desc:"Learning parameters and methods that operate at the neuron level"` + // Learning parameters and methods that operate at the neuron level + Learn LearnNeurParams `view:"add-fields"` - // [view: inline] [viewif: LayType=SuperLayer] BurstParams determine how the 5IB Burst activation is computed from CaSpkP integrated spiking values in Super layers -- thresholded. - Bursts BurstParams `viewif:"LayType=SuperLayer" view:"inline" desc:"BurstParams determine how the 5IB Burst activation is computed from CaSpkP integrated spiking values in Super layers -- thresholded."` + // BurstParams determine how the 5IB Burst activation is computed from CaSpkP integrated spiking values in Super layers -- thresholded. + Bursts BurstParams `viewif:"LayType=SuperLayer" view:"inline"` - // [view: inline] [viewif: LayType=[CTLayer,PTPredLayer,PTNotMaintLayer,BLALayer]] params for the CT corticothalamic layer and PTPred layer that generates predictions over the Pulvinar using context -- uses the CtxtGe excitatory input plus stronger NMDA channels to maintain context trace - CT CTParams `viewif:"LayType=[CTLayer,PTPredLayer,PTNotMaintLayer,BLALayer]" view:"inline" desc:"params for the CT corticothalamic layer and PTPred layer that generates predictions over the Pulvinar using context -- uses the CtxtGe excitatory input plus stronger NMDA channels to maintain context trace"` + // ] params for the CT corticothalamic layer and PTPred layer that generates predictions over the Pulvinar using context -- uses the CtxtGe excitatory input plus stronger NMDA channels to maintain context trace + CT CTParams `viewif:"LayType=[CTLayer,PTPredLayer,PTNotMaintLayer,BLALayer]" view:"inline"` - // [view: inline] [viewif: LayType=PulvinarLayer] provides parameters for how the plus-phase (outcome) state of Pulvinar thalamic relay cell neurons is computed from the corresponding driver neuron Burst activation (or CaSpkP if not Super) - Pulv PulvParams `viewif:"LayType=PulvinarLayer" view:"inline" desc:"provides parameters for how the plus-phase (outcome) state of Pulvinar thalamic relay cell neurons is computed from the corresponding driver neuron Burst activation (or CaSpkP if not Super)"` + // provides parameters for how the plus-phase (outcome) state of Pulvinar thalamic relay cell neurons is computed from the corresponding driver neuron Burst activation (or CaSpkP if not Super) + Pulv PulvParams `viewif:"LayType=PulvinarLayer" view:"inline"` - // [view: inline] [viewif: LayType=MatrixLayer] parameters for BG Striatum Matrix MSN layers, which are the main Go / NoGo gating units in BG. - Matrix MatrixParams `viewif:"LayType=MatrixLayer" view:"inline" desc:"parameters for BG Striatum Matrix MSN layers, which are the main Go / NoGo gating units in BG."` + // parameters for BG Striatum Matrix MSN layers, which are the main Go / NoGo gating units in BG. + Matrix MatrixParams `viewif:"LayType=MatrixLayer" view:"inline"` - // [view: inline] [viewif: LayType=GPLayer] type of GP Layer. - GP GPParams `viewif:"LayType=GPLayer" view:"inline" desc:"type of GP Layer."` + // type of GP Layer. + GP GPParams `viewif:"LayType=GPLayer" view:"inline"` - // [view: inline] [viewif: LayType=VSPatchLayer] parameters for VSPatch learning - VSPatch VSPatchParams `viewif:"LayType=VSPatchLayer" view:"inline" desc:"parameters for VSPatch learning"` + // parameters for VSPatch learning + VSPatch VSPatchParams `viewif:"LayType=VSPatchLayer" view:"inline"` - // [view: inline] [viewif: LayType=LDTLayer] parameterizes laterodorsal tegmentum ACh salience neuromodulatory signal, driven by superior colliculus stimulus novelty, US input / absence, and OFC / ACC inhibition - LDT LDTParams `viewif:"LayType=LDTLayer" view:"inline" desc:"parameterizes laterodorsal tegmentum ACh salience neuromodulatory signal, driven by superior colliculus stimulus novelty, US input / absence, and OFC / ACC inhibition"` + // parameterizes laterodorsal tegmentum ACh salience neuromodulatory signal, driven by superior colliculus stimulus novelty, US input / absence, and OFC / ACC inhibition + LDT LDTParams `viewif:"LayType=LDTLayer" view:"inline"` - // [view: inline] [viewif: LayType=VTALayer] parameterizes computing overall VTA DA based on LHb PVDA (primary value -- at US time, computed at start of each trial and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned value (LV) activations, which update every cycle. - VTA VTAParams `viewif:"LayType=VTALayer" view:"inline" desc:"parameterizes computing overall VTA DA based on LHb PVDA (primary value -- at US time, computed at start of each trial and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned value (LV) activations, which update every cycle."` + // parameterizes computing overall VTA DA based on LHb PVDA (primary value -- at US time, computed at start of each trial and stored in LHbPVDA global value) and Amygdala (CeM) CS / learned value (LV) activations, which update every cycle. + VTA VTAParams `viewif:"LayType=VTALayer" view:"inline"` - // [view: inline] [viewif: LayType=RWPredLayer] parameterizes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework). - RWPred RWPredParams `viewif:"LayType=RWPredLayer" view:"inline" desc:"parameterizes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework)."` + // parameterizes reward prediction for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework). + RWPred RWPredParams `viewif:"LayType=RWPredLayer" view:"inline"` - // [view: inline] [viewif: LayType=RWDaLayer] parameterizes reward prediction dopamine for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework). - RWDa RWDaParams `viewif:"LayType=RWDaLayer" view:"inline" desc:"parameterizes reward prediction dopamine for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework)."` + // parameterizes reward prediction dopamine for a simple Rescorla-Wagner learning dynamic (i.e., PV learning in the PVLV framework). + RWDa RWDaParams `viewif:"LayType=RWDaLayer" view:"inline"` - // [view: inline] [viewif: LayType=TDIntegLayer] parameterizes TD reward integration layer - TDInteg TDIntegParams `viewif:"LayType=TDIntegLayer" view:"inline" desc:"parameterizes TD reward integration layer"` + // parameterizes TD reward integration layer + TDInteg TDIntegParams `viewif:"LayType=TDIntegLayer" view:"inline"` - // [view: inline] [viewif: LayType=TDDaLayer] parameterizes dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase. - TDDa TDDaParams `viewif:"LayType=TDDaLayer" view:"inline" desc:"parameterizes dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase."` + // parameterizes dopamine (DA) signal as the temporal difference (TD) between the TDIntegLayer activations in the minus and plus phase. + TDDa TDDaParams `viewif:"LayType=TDDaLayer" view:"inline"` // recv and send projection array access info - Idxs LayerIdxs `desc:"recv and send projection array access info"` + Idxs LayerIdxs } func (ly *LayerParams) Update() { diff --git a/axon/learn.go b/axon/learn.go index f228f6a34..a434ef299 100644 --- a/axon/learn.go +++ b/axon/learn.go @@ -7,10 +7,10 @@ package axon import ( "github.com/emer/axon/chans" "github.com/emer/axon/kinase" - "github.com/emer/emergent/erand" - "github.com/emer/etable/minmax" + "github.com/emer/emergent/v2/erand" "github.com/goki/gosl/slbool" - "github.com/goki/mat32" + "goki.dev/etable/v2/minmax" + "goki.dev/mat32/v2" ) /////////////////////////////////////////////////////////////////////// @@ -30,29 +30,29 @@ import ( // CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase). type CaLrnParams struct { - // [def: 80] denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance - Norm float32 `def:"80" desc:"denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance"` + // denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance + Norm float32 `def:"80"` - // [def: true] use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike - SpkVGCC slbool.Bool `def:"true" desc:"use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike"` + // use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike + SpkVGCC slbool.Bool `def:"true"` - // [def: 35] multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode - SpkVgccCa float32 `def:"35" desc:"multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode"` + // multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode + SpkVgccCa float32 `def:"35"` - // [def: 10] time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn - VgccTau float32 `def:"10" desc:"time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn"` + // time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn + VgccTau float32 `def:"10"` - // [view: inline] time constants for integrating CaLrn across M, P and D cascading levels - Dt kinase.CaDtParams `view:"inline" desc:"time constants for integrating CaLrn across M, P and D cascading levels"` + // time constants for integrating CaLrn across M, P and D cascading levels + Dt kinase.CaDtParams `view:"inline"` - // [def: 0.01,0.02,0.5] Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default. - UpdtThr float32 `def:"0.01,0.02,0.5" desc:"Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default."` + // Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default. + UpdtThr float32 `def:"0.01,0.02,0.5"` - // [view: -] rate = 1 / tau - VgccDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"` + // rate = 1 / tau + VgccDt float32 `view:"-" json:"-" xml:"-" inactive:"+"` - // [view: -] = 1 / Norm - NormInv float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"= 1 / Norm"` + // = 1 / Norm + NormInv float32 `view:"-" json:"-" xml:"-" inactive:"+"` pad int32 } @@ -104,19 +104,19 @@ func (np *CaLrnParams) CaLrns(ctx *Context, ni, di uint32) { // and RLRate as a proxy for the activation (spiking) based learning signal. type CaSpkParams struct { - // [def: 8,12] gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdtThr. Prjn.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller. - SpikeG float32 `def:"8,12" desc:"gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdtThr. Prjn.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller."` + // gain multiplier on spike for computing CaSpk: increasing this directly affects the magnitude of the trace values, learning rate in Target layers, and other factors that depend on CaSpk values: RLRate, UpdtThr. Prjn.KinaseCa.SpikeG provides an additional gain factor specific to the synapse-level trace factors, without affecting neuron-level CaSpk values. Larger networks require higher gain factors at the neuron level -- 12, vs 8 for smaller. + SpikeG float32 `def:"8,12"` - // [def: 30] [min: 1] time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PrjnParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau) - SynTau float32 `def:"30" min:"1" desc:"time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PrjnParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau)"` + // time constant for integrating spike-driven calcium trace at sender and recv neurons, CaSyn, which then drives synapse-level integration of the joint pre * post synapse-level activity, in cycles (msec). Note: if this param is changed, then there will be a change in effective learning rate that can be compensated for by multiplying PrjnParams.Learn.KinaseCa.SpikeG by sqrt(30 / sqrt(SynTau) + SynTau float32 `def:"30" min:"1"` - // [view: -] rate = 1 / tau - SynDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"` + // rate = 1 / tau + SynDt float32 `view:"-" json:"-" xml:"-" inactive:"+"` pad int32 - // [view: inline] time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Prjn level for synaptic integration, except for the M factor. - Dt kinase.CaDtParams `view:"inline" desc:"time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Prjn level for synaptic integration, except for the M factor."` + // time constants for integrating CaSpk across M, P and D cascading levels -- these are typically the same as in CaLrn and Prjn level for synaptic integration, except for the M factor. + Dt kinase.CaDtParams `view:"inline"` } func (np *CaSpkParams) Defaults() { @@ -149,30 +149,30 @@ func (np *CaSpkParams) CaFmSpike(ctx *Context, ni, di uint32) { type TrgAvgActParams struct { // whether to use target average activity mechanism to scale synaptic weights - On slbool.Bool `desc:"whether to use target average activity mechanism to scale synaptic weights"` + On slbool.Bool // if this is > 0, then each neuron's GiBase is initialized as this proportion of TrgRange.Max - TrgAvg -- gives neurons differences in intrinsic inhibition / leak as a starting bias - GiBaseInit float32 `desc:"if this is > 0, then each neuron's GiBase is initialized as this proportion of TrgRange.Max - TrgAvg -- gives neurons differences in intrinsic inhibition / leak as a starting bias"` + GiBaseInit float32 - // [def: 0.02] [viewif: On] learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference. - ErrLRate float32 `viewif:"On" def:"0.02" desc:"learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference."` + // learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference. + ErrLRate float32 `viewif:"On" def:"0.02"` - // [def: 0.005,0.0002] [viewif: On] rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value. - SynScaleRate float32 `viewif:"On" def:"0.005,0.0002" desc:"rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value."` + // rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value. + SynScaleRate float32 `viewif:"On" def:"0.005,0.0002"` - // [def: 0,1] [viewif: On] amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point. - SubMean float32 `viewif:"On" def:"0,1" desc:"amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point."` + // amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point. + SubMean float32 `viewif:"On" def:"0,1"` - // [def: true] [viewif: On] permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used - Permute slbool.Bool `viewif:"On" def:"true" desc:"permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used"` + // permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used + Permute slbool.Bool `viewif:"On" def:"true"` - // [viewif: On] use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool - Pool slbool.Bool `viewif:"On" desc:"use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool"` + // use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool + Pool slbool.Bool `viewif:"On"` pad int32 - // [def: {'Min':0.5,'Max':2}] [viewif: On] range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range. - TrgRange minmax.F32 `viewif:"On" def:"{'Min':0.5,'Max':2}" desc:"range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range."` + // range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range. + TrgRange minmax.F32 `viewif:"On" def:"{'Min':0.5,'Max':2}"` } func (ta *TrgAvgActParams) Update() { @@ -197,23 +197,23 @@ func (ta *TrgAvgActParams) Defaults() { // activity levels, and based on the phase-wise differences in activity (Diff). type RLRateParams struct { - // [def: true] use learning rate modulation - On slbool.Bool `def:"true" desc:"use learning rate modulation"` + // use learning rate modulation + On slbool.Bool `def:"true"` - // [def: 0.05,1] [viewif: On] minimum learning rate multiplier for sigmoidal act (1-act) factor -- prevents lrate from going too low for extreme values. Set to 1 to disable Sigmoid derivative factor, which is default for Target layers. - SigmoidMin float32 `viewif:"On" def:"0.05,1" desc:"minimum learning rate multiplier for sigmoidal act (1-act) factor -- prevents lrate from going too low for extreme values. Set to 1 to disable Sigmoid derivative factor, which is default for Target layers."` + // minimum learning rate multiplier for sigmoidal act (1-act) factor -- prevents lrate from going too low for extreme values. Set to 1 to disable Sigmoid derivative factor, which is default for Target layers. + SigmoidMin float32 `viewif:"On" def:"0.05,1"` - // [viewif: On] modulate learning rate as a function of plus - minus differences - Diff slbool.Bool `viewif:"On" desc:"modulate learning rate as a function of plus - minus differences"` + // modulate learning rate as a function of plus - minus differences + Diff slbool.Bool `viewif:"On"` - // [def: 0.1] [viewif: On&&Diff] threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies -- must be > 0 to prevent div by zero - SpkThr float32 `viewif:"On&&Diff" def:"0.1" desc:"threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies -- must be > 0 to prevent div by zero"` + // threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies -- must be > 0 to prevent div by zero + SpkThr float32 `viewif:"On&&Diff" def:"0.1"` - // [def: 0.02] [viewif: On&&Diff] threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value - DiffThr float32 `viewif:"On&&Diff" def:"0.02" desc:"threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value"` + // threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value + DiffThr float32 `viewif:"On&&Diff" def:"0.02"` - // [def: 0.001] [viewif: On&&Diff] for Diff component, minimum learning rate value when below ActDiffThr - Min float32 `viewif:"On&&Diff" def:"0.001" desc:"for Diff component, minimum learning rate value when below ActDiffThr"` + // for Diff component, minimum learning rate value when below ActDiffThr + Min float32 `viewif:"On&&Diff" def:"0.001"` pad, pad1 int32 } @@ -270,23 +270,23 @@ func (rl *RLRateParams) RLRateDiff(scap, scad float32) float32 { // This is mainly the running average activations that drive learning type LearnNeurParams struct { - // [view: inline] parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase). - CaLearn CaLrnParams `view:"inline" desc:"parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase)."` + // parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase). + CaLearn CaLrnParams `view:"inline"` - // [view: inline] parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdtThr and RLRate as a proxy for the activation (spiking) based learning signal. - CaSpk CaSpkParams `view:"inline" desc:"parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdtThr and RLRate as a proxy for the activation (spiking) based learning signal."` + // parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdtThr and RLRate as a proxy for the activation (spiking) based learning signal. + CaSpk CaSpkParams `view:"inline"` - // [view: inline] NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes - LrnNMDA chans.NMDAParams `view:"inline" desc:"NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes"` + // NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes + LrnNMDA chans.NMDAParams `view:"inline"` - // [view: inline] synaptic scaling parameters for regulating overall average activity compared to neuron's own target level - TrgAvgAct TrgAvgActParams `view:"inline" desc:"synaptic scaling parameters for regulating overall average activity compared to neuron's own target level"` + // synaptic scaling parameters for regulating overall average activity compared to neuron's own target level + TrgAvgAct TrgAvgActParams `view:"inline"` - // [view: inline] recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD) - RLRate RLRateParams `view:"inline" desc:"recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)"` + // recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD) + RLRate RLRateParams `view:"inline"` - // [view: inline] neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms - NeuroMod NeuroModParams `view:"inline" desc:"neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms"` + // neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms + NeuroMod NeuroModParams `view:"inline"` } func (ln *LearnNeurParams) Update() { @@ -419,17 +419,17 @@ func SigInvFun61(w float32) float32 { // SWtInitParams for initial SWt values type SWtInitParams struct { - // [def: 0,1,0.5] [min: 0] [max: 1] how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed. - SPct float32 `min:"0" max:"1" def:"0,1,0.5" desc:"how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed."` + // how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed. + SPct float32 `min:"0" max:"1" def:"0,1,0.5"` - // [def: 0.5,0.4] target mean weight values across receiving neuron's projection -- the mean SWt values are constrained to remain at this value. some projections may benefit from lower mean of .4 - Mean float32 `def:"0.5,0.4" desc:"target mean weight values across receiving neuron's projection -- the mean SWt values are constrained to remain at this value. some projections may benefit from lower mean of .4"` + // target mean weight values across receiving neuron's projection -- the mean SWt values are constrained to remain at this value. some projections may benefit from lower mean of .4 + Mean float32 `def:"0.5,0.4"` - // [def: 0.25] initial variance in weight values, prior to constraints. - Var float32 `def:"0.25" desc:"initial variance in weight values, prior to constraints."` + // initial variance in weight values, prior to constraints. + Var float32 `def:"0.25"` - // [def: true] symmetrize the initial weight values with those in reciprocal projection -- typically true for bidirectional excitatory connections - Sym slbool.Bool `def:"true" desc:"symmetrize the initial weight values with those in reciprocal projection -- typically true for bidirectional excitatory connections"` + // symmetrize the initial weight values with those in reciprocal projection -- typically true for bidirectional excitatory connections + Sym slbool.Bool `def:"true"` } func (sp *SWtInitParams) Defaults() { @@ -446,16 +446,16 @@ func (sp *SWtInitParams) Update() { type SWtAdaptParams struct { // if true, adaptation is active -- if false, SWt values are not updated, in which case it is generally good to have Init.SPct=0 too. - On slbool.Bool `desc:"if true, adaptation is active -- if false, SWt values are not updated, in which case it is generally good to have Init.SPct=0 too."` + On slbool.Bool - // [def: 0.1,0.01,0.001,0.0002] [viewif: On] learning rate multiplier on the accumulated DWt values (which already have fast LRate applied) to incorporate into SWt during slow outer loop updating -- lower values impose stronger constraints, for larger networks that need more structural support, e.g., 0.001 is better after 1,000 epochs in large models. 0.1 is fine for smaller models. - LRate float32 `viewif:"On" def:"0.1,0.01,0.001,0.0002" desc:"learning rate multiplier on the accumulated DWt values (which already have fast LRate applied) to incorporate into SWt during slow outer loop updating -- lower values impose stronger constraints, for larger networks that need more structural support, e.g., 0.001 is better after 1,000 epochs in large models. 0.1 is fine for smaller models."` + // learning rate multiplier on the accumulated DWt values (which already have fast LRate applied) to incorporate into SWt during slow outer loop updating -- lower values impose stronger constraints, for larger networks that need more structural support, e.g., 0.001 is better after 1,000 epochs in large models. 0.1 is fine for smaller models. + LRate float32 `viewif:"On" def:"0.1,0.01,0.001,0.0002"` - // [def: 1] [viewif: On] amount of mean to subtract from SWt delta when updating -- generally best to set to 1 - SubMean float32 `viewif:"On" def:"1" desc:"amount of mean to subtract from SWt delta when updating -- generally best to set to 1"` + // amount of mean to subtract from SWt delta when updating -- generally best to set to 1 + SubMean float32 `viewif:"On" def:"1"` - // [def: 6] [viewif: On] gain of sigmoidal constrast enhancement function used to transform learned, linear LWt values into Wt values - SigGain float32 `viewif:"On" def:"6" desc:"gain of sigmoidal constrast enhancement function used to transform learned, linear LWt values into Wt values"` + // gain of sigmoidal constrast enhancement function used to transform learned, linear LWt values into Wt values + SigGain float32 `viewif:"On" def:"6"` } func (sp *SWtAdaptParams) Defaults() { @@ -492,14 +492,14 @@ func (sp *SWtInitParams) RndVar(rnd erand.Rand) float32 { // more dynamic and supported by the regular learned weights. type SWtParams struct { - // [view: inline] initialization of SWt values - Init SWtInitParams `view:"inline" desc:"initialization of SWt values"` + // initialization of SWt values + Init SWtInitParams `view:"inline"` - // [view: inline] adaptation of SWt values in response to LWt learning - Adapt SWtAdaptParams `view:"inline" desc:"adaptation of SWt values in response to LWt learning"` + // adaptation of SWt values in response to LWt learning + Adapt SWtAdaptParams `view:"inline"` - // [def: {'Min':0.2,'Max':0.8}] [view: inline] range limits for SWt values - Limit minmax.F32 `def:"{'Min':0.2,'Max':0.8}" view:"inline" desc:"range limits for SWt values"` + // range limits for SWt values + Limit minmax.F32 `def:"{'Min':0.2,'Max':0.8}" view:"inline"` } func (sp *SWtParams) Defaults() { @@ -633,17 +633,17 @@ func (sp *SWtParams) InitWtsSyn(ctx *Context, syni uint32, rnd erand.Rand, mean, // LRateParams manages learning rate parameters type LRateParams struct { - // [def: 0.04,0.1,0.2] base learning rate for this projection -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2. - Base float32 `def:"0.04,0.1,0.2" desc:"base learning rate for this projection -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2."` + // base learning rate for this projection -- can be modulated by other factors below -- for larger networks, use slower rates such as 0.04, smaller networks can use faster 0.2. + Base float32 `def:"0.04,0.1,0.2"` // scheduled learning rate multiplier, simulating reduction in plasticity over aging - Sched float32 `desc:"scheduled learning rate multiplier, simulating reduction in plasticity over aging"` + Sched float32 // dynamic learning rate modulation due to neuromodulatory or other such factors - Mod float32 `desc:"dynamic learning rate modulation due to neuromodulatory or other such factors"` + Mod float32 // effective actual learning rate multiplier used in computing DWt: Eff = eMod * Sched * Base - Eff float32 `inactive:"+" desc:"effective actual learning rate multiplier used in computing DWt: Eff = eMod * Sched * Base"` + Eff float32 `inactive:"+"` } func (ls *LRateParams) Defaults() { @@ -671,17 +671,17 @@ func (ls *LRateParams) Init() { // TraceParams manages learning rate parameters type TraceParams struct { - // [def: 1,2,4] time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace - Tau float32 `def:"1,2,4" desc:"time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace"` + // time constant for integrating trace over theta cycle timescales -- governs the decay rate of syanptic trace + Tau float32 `def:"1,2,4"` - // [def: 0,1] amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning projections, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special prjn types (e.g., Hebb) benefit from SubMean = 1 always - SubMean float32 `def:"0,1" desc:"amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning projections, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special prjn types (e.g., Hebb) benefit from SubMean = 1 always"` + // amount of the mean dWt to subtract, producing a zero-sum effect -- 1.0 = full zero-sum dWt -- only on non-zero DWts. typically set to 0 for standard trace learning projections, although some require it for stability over the long haul. can use SetSubMean to set to 1 after significant early learning has occurred with 0. Some special prjn types (e.g., Hebb) benefit from SubMean = 1 always + SubMean float32 `def:"0,1"` // threshold for learning, depending on different algorithms -- in Matrix and VSPatch it applies to normalized GeIntNorm value -- setting this relatively high encourages sparser representations - LearnThr float32 `desc:"threshold for learning, depending on different algorithms -- in Matrix and VSPatch it applies to normalized GeIntNorm value -- setting this relatively high encourages sparser representations"` + LearnThr float32 - // [view: -] rate = 1 / tau - Dt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"` + // rate = 1 / tau + Dt float32 `view:"-" json:"-" xml:"-" inactive:"+"` } func (tp *TraceParams) Defaults() { @@ -712,15 +712,15 @@ func (tp *TraceParams) TrFmCa(tr float32, ca float32) float32 { type LRateMod struct { // toggle use of this modulation factor - On slbool.Bool `desc:"toggle use of this modulation factor"` + On slbool.Bool - // [viewif: On] [min: 0] [max: 1] baseline learning rate -- what you get for correct cases - Base float32 `viewif:"On" min:"0" max:"1" desc:"baseline learning rate -- what you get for correct cases"` + // baseline learning rate -- what you get for correct cases + Base float32 `viewif:"On" min:"0" max:"1"` pad, pad1 int32 - // [viewif: On] defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1 - Range minmax.F32 `viewif:"On" desc:"defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1"` + // defines the range over which modulation occurs for the modulator factor -- Min and below get the Base level of learning rate modulation, Max and above get a modulation of 1 + Range minmax.F32 `viewif:"On"` } func (lr *LRateMod) Defaults() { @@ -770,18 +770,18 @@ func (lr *LRateMod) LRateMod(net *Network, fact float32) float32 { type LearnSynParams struct { // enable learning for this projection - Learn slbool.Bool `desc:"enable learning for this projection"` + Learn slbool.Bool pad, pad1, pad2 int32 - // [viewif: Learn] learning rate parameters, supporting two levels of modulation on top of base learning rate. - LRate LRateParams `viewif:"Learn" desc:"learning rate parameters, supporting two levels of modulation on top of base learning rate."` + // learning rate parameters, supporting two levels of modulation on top of base learning rate. + LRate LRateParams `viewif:"Learn"` - // [viewif: Learn] trace-based learning parameters - Trace TraceParams `viewif:"Learn" desc:"trace-based learning parameters"` + // trace-based learning parameters + Trace TraceParams `viewif:"Learn"` - // [view: inline] [viewif: Learn] kinase calcium Ca integration parameters - KinaseCa kinase.CaParams `viewif:"Learn" view:"inline" desc:"kinase calcium Ca integration parameters"` + // kinase calcium Ca integration parameters + KinaseCa kinase.CaParams `viewif:"Learn" view:"inline"` } func (ls *LearnSynParams) Update() { diff --git a/axon/logging.go b/axon/logging.go index 8dd13aaff..cff928567 100644 --- a/axon/logging.go +++ b/axon/logging.go @@ -7,19 +7,19 @@ package axon import ( "strconv" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/etable/agg" - "github.com/emer/etable/eplot" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - "github.com/emer/etable/metric" - "github.com/emer/etable/minmax" - "github.com/emer/etable/norm" - "github.com/emer/etable/split" - "github.com/emer/etable/tsragg" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "goki.dev/etable/v2/agg" + "goki.dev/etable/v2/eplot" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + "goki.dev/etable/v2/metric" + "goki.dev/etable/v2/minmax" + "goki.dev/etable/v2/norm" + "goki.dev/etable/v2/split" + "goki.dev/etable/v2/tsragg" ) // LogTestErrors records all errors made across TestTrials, at Test Epoch scope @@ -627,11 +627,13 @@ func LayerActsLogRecReset(lg *elog.Logs) { // LayerActsLogConfigGUI configures GUI for LayerActsLog Plot and LayerActs Avg Plot func LayerActsLogConfigGUI(lg *elog.Logs, gui *egui.GUI) { - plt := gui.TabView.AddNewTab(eplot.KiT_Plot2D, "LayerActs Plot").(*eplot.Plot2D) + pt := gui.Tabs.NewTab("LayerActs Plot") + plt := eplot.NewPlot2D(pt) gui.Plots["LayerActs"] = plt plt.SetTable(lg.MiscTables["LayerActs"]) - plt = gui.TabView.AddNewTab(eplot.KiT_Plot2D, "LayerActs Avg Plot").(*eplot.Plot2D) + pt = gui.Tabs.NewTab("LayerActs Avg Plot") + plt = eplot.NewPlot2D(pt) gui.Plots["LayerActsAvg"] = plt plt.SetTable(lg.MiscTables["LayerActsAvg"]) } diff --git a/axon/looper.go b/axon/looper.go index e5f5b9557..91ddd9e03 100644 --- a/axon/looper.go +++ b/axon/looper.go @@ -5,11 +5,11 @@ package axon import ( - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" ) // LooperStdPhases adds the minus and plus phases of the theta cycle, diff --git a/axon/network.go b/axon/network.go index 776a2d32a..74253f8dc 100644 --- a/axon/network.go +++ b/axon/network.go @@ -9,11 +9,11 @@ import ( "strings" "github.com/c2h5oh/datasize" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/prjn" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/prjn" "github.com/goki/ki/ki" "github.com/goki/ki/kit" + "goki.dev/etable/v2/etensor" ) // axon.Network implements the Axon spiking model, diff --git a/axon/network_test.go b/axon/network_test.go index dfa7d38ef..1ebb36635 100644 --- a/axon/network_test.go +++ b/axon/network_test.go @@ -5,7 +5,7 @@ package axon import ( "testing" - "github.com/emer/emergent/emer" + "github.com/emer/emergent/v2/emer" "github.com/stretchr/testify/assert" ) diff --git a/axon/networkbase.go b/axon/networkbase.go index 792fd19dc..c021f120f 100644 --- a/axon/networkbase.go +++ b/axon/networkbase.go @@ -20,150 +20,150 @@ import ( "strings" "time" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" - "github.com/emer/emergent/timer" - "github.com/emer/emergent/weights" - "github.com/goki/gi/gi" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" + "github.com/emer/emergent/v2/timer" + "github.com/emer/emergent/v2/weights" "github.com/goki/ki/indent" "github.com/goki/kigen/dedupe" - "github.com/goki/mat32" + "goki.dev/gi/v2/gi" + "goki.dev/mat32/v2" ) // NetworkBase manages the basic structural components of a network (layers). // The main Network then can just have the algorithm-specific code. type NetworkBase struct { - // [view: -] we need a pointer to ourselves as an emer.Network, which can always be used to extract the true underlying type of object when network is embedded in other structs -- function receivers do not have this ability so this is necessary. - EmerNet emer.Network `copy:"-" json:"-" xml:"-" view:"-" desc:"we need a pointer to ourselves as an emer.Network, which can always be used to extract the true underlying type of object when network is embedded in other structs -- function receivers do not have this ability so this is necessary."` + // we need a pointer to ourselves as an emer.Network, which can always be used to extract the true underlying type of object when network is embedded in other structs -- function receivers do not have this ability so this is necessary. + EmerNet emer.Network `copy:"-" json:"-" xml:"-" view:"-"` // overall name of network -- helps discriminate if there are multiple - Nm string `desc:"overall name of network -- helps discriminate if there are multiple"` + Nm string // filename of last weights file loaded or saved - WtsFile string `desc:"filename of last weights file loaded or saved"` + WtsFile string // PVLV system for phasic dopamine signaling, including internal drives, US outcomes. Core LHb (lateral habenula) and VTA (ventral tegmental area) dopamine are computed in equations using inputs from specialized network layers (LDTLayer driven by BLA, CeM layers, VSPatchLayer). Renders USLayer, PVLayer, DrivesLayer representations based on state updated here. - PVLV PVLV `desc:"PVLV system for phasic dopamine signaling, including internal drives, US outcomes. Core LHb (lateral habenula) and VTA (ventral tegmental area) dopamine are computed in equations using inputs from specialized network layers (LDTLayer driven by BLA, CeM layers, VSPatchLayer). Renders USLayer, PVLayer, DrivesLayer representations based on state updated here."` + PVLV PVLV - // [view: -] map of name to layers -- layer names must be unique - LayMap map[string]*Layer `view:"-" desc:"map of name to layers -- layer names must be unique"` + // map of name to layers -- layer names must be unique + LayMap map[string]*Layer `view:"-"` - // [view: -] map of layer classes -- made during Build - LayClassMap map[string][]string `view:"-" desc:"map of layer classes -- made during Build"` + // map of layer classes -- made during Build + LayClassMap map[string][]string `view:"-"` - // [view: -] minimum display position in network - MinPos mat32.Vec3 `view:"-" desc:"minimum display position in network"` + // minimum display position in network + MinPos mat32.Vec3 `view:"-"` - // [view: -] maximum display position in network - MaxPos mat32.Vec3 `view:"-" desc:"maximum display position in network"` + // maximum display position in network + MaxPos mat32.Vec3 `view:"-"` // optional metadata that is saved in network weights files -- e.g., can indicate number of epochs that were trained, or any other information about this network that would be useful to save - MetaData map[string]string `desc:"optional metadata that is saved in network weights files -- e.g., can indicate number of epochs that were trained, or any other information about this network that would be useful to save"` + MetaData map[string]string // if true, the neuron and synapse variables will be organized into a gpu-optimized memory order, otherwise cpu-optimized. This must be set before network Build() is called. - UseGPUOrder bool `inactive:"+" desc:"if true, the neuron and synapse variables will be organized into a gpu-optimized memory order, otherwise cpu-optimized. This must be set before network Build() is called."` + UseGPUOrder bool `inactive:"+"` - // [view: -] network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode - NetIdx uint32 `view:"-" desc:"network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode"` + // network index in global Networks list of networks -- needed for GPU shader kernel compatible network variable access functions (e.g., NrnV, SynV etc) in CPU mode + NetIdx uint32 `view:"-"` - // [view: -] maximum synaptic delay across any projection in the network -- used for sizing the GBuf accumulation buffer. - MaxDelay uint32 `inactive:"+" view:"-" desc:"maximum synaptic delay across any projection in the network -- used for sizing the GBuf accumulation buffer."` + // maximum synaptic delay across any projection in the network -- used for sizing the GBuf accumulation buffer. + MaxDelay uint32 `inactive:"+" view:"-"` // maximum number of data inputs that can be processed in parallel in one pass of the network. Neuron storage is allocated to hold this amount during Build process, and this value reflects that. - MaxData uint32 `inactive:"+" desc:"maximum number of data inputs that can be processed in parallel in one pass of the network. Neuron storage is allocated to hold this amount during Build process, and this value reflects that."` + MaxData uint32 `inactive:"+"` // total number of neurons - NNeurons uint32 `inactive:"+" desc:"total number of neurons"` + NNeurons uint32 `inactive:"+"` // total number of synapses - NSyns uint32 `inactive:"+" desc:"total number of synapses"` + NSyns uint32 `inactive:"+"` - // [view: -] storage for global vars - Globals []float32 `view:"-" desc:"storage for global vars"` + // storage for global vars + Globals []float32 `view:"-"` // array of layers - Layers []*Layer `desc:"array of layers"` + Layers []*Layer - // [view: -] [Layers] array of layer parameters, in 1-to-1 correspondence with Layers - LayParams []LayerParams `view:"-" desc:"[Layers] array of layer parameters, in 1-to-1 correspondence with Layers"` + // array of layer parameters, in 1-to-1 correspondence with Layers + LayParams []LayerParams `view:"-"` - // [view: -] [Layers][MaxData] array of layer values, with extra per data - LayVals []LayerVals `view:"-" desc:"[Layers][MaxData] array of layer values, with extra per data"` + // array of layer values, with extra per data + LayVals []LayerVals `view:"-"` - // [view: -] [Layers][Pools][MaxData] array of inhibitory pools for all layers. - Pools []Pool `view:"-" desc:"[Layers][Pools][MaxData] array of inhibitory pools for all layers."` + // array of inhibitory pools for all layers. + Pools []Pool `view:"-"` - // [view: -] [Layers][Neurons][MaxData] entire network's allocation of neuron variables, accessed via NrnV function with flexible striding - Neurons []float32 `view:"-" desc:"[Layers][Neurons][MaxData] entire network's allocation of neuron variables, accessed via NrnV function with flexible striding"` + // entire network's allocation of neuron variables, accessed via NrnV function with flexible striding + Neurons []float32 `view:"-"` - // [view: -] [Layers][Neurons][MaxData]] entire network's allocation of neuron average avariables, accessed via NrnAvgV function with flexible striding - NeuronAvgs []float32 `view:"-" desc:"[Layers][Neurons][MaxData]] entire network's allocation of neuron average avariables, accessed via NrnAvgV function with flexible striding"` + // ] entire network's allocation of neuron average avariables, accessed via NrnAvgV function with flexible striding + NeuronAvgs []float32 `view:"-"` - // [view: -] [Layers][Neurons] entire network's allocation of neuron index variables, accessed via NrnI function with flexible striding - NeuronIxs []uint32 `view:"-" desc:"[Layers][Neurons] entire network's allocation of neuron index variables, accessed via NrnI function with flexible striding"` + // entire network's allocation of neuron index variables, accessed via NrnI function with flexible striding + NeuronIxs []uint32 `view:"-"` - // [view: -] [Layers][SendPrjns] pointers to all projections in the network, sender-based - Prjns []*Prjn `view:"-" desc:"[Layers][SendPrjns] pointers to all projections in the network, sender-based"` + // pointers to all projections in the network, sender-based + Prjns []*Prjn `view:"-"` - // [view: -] [Layers][SendPrjns] array of projection parameters, in 1-to-1 correspondence with Prjns, sender-based - PrjnParams []PrjnParams `view:"-" desc:"[Layers][SendPrjns] array of projection parameters, in 1-to-1 correspondence with Prjns, sender-based"` + // array of projection parameters, in 1-to-1 correspondence with Prjns, sender-based + PrjnParams []PrjnParams `view:"-"` - // [view: -] [Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapse idx vars, organized sender-based, with flexible striding, accessed via SynI function - SynapseIxs []uint32 `view:"-" desc:"[Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapse idx vars, organized sender-based, with flexible striding, accessed via SynI function"` + // entire network's allocation of synapse idx vars, organized sender-based, with flexible striding, accessed via SynI function + SynapseIxs []uint32 `view:"-"` - // [view: -] [Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapses, organized sender-based, with flexible striding, accessed via SynV function - Synapses []float32 `view:"-" desc:"[Layers][SendPrjns][SendNeurons][RecvNeurons] entire network's allocation of synapses, organized sender-based, with flexible striding, accessed via SynV function"` + // entire network's allocation of synapses, organized sender-based, with flexible striding, accessed via SynV function + Synapses []float32 `view:"-"` - // [view: -] [Layers][SendPrjns][SendNeurons][RecvNeurons][MaxData] entire network's allocation of synapse Ca vars, organized sender-based, with flexible striding, accessed via SynCaV function - SynapseCas []float32 `view:"-" desc:"[Layers][SendPrjns][SendNeurons][RecvNeurons][MaxData] entire network's allocation of synapse Ca vars, organized sender-based, with flexible striding, accessed via SynCaV function"` + // entire network's allocation of synapse Ca vars, organized sender-based, with flexible striding, accessed via SynCaV function + SynapseCas []float32 `view:"-"` - // [view: -] [Layers][SendPrjns][SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. - PrjnSendCon []StartN `view:"-" desc:"[Layers][SendPrjns][SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based."` + // starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. + PrjnSendCon []StartN `view:"-"` - // [view: -] [Layers][RecvPrjns][RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. - PrjnRecvCon []StartN `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based."` + // starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. + PrjnRecvCon []StartN `view:"-"` - // [view: -] [Layers][RecvPrjns][RecvNeurons][MaxDelay][MaxData] conductance buffer for accumulating spikes -- subslices are allocated to each projection -- uses int-encoded float values for faster GPU atomic integration - PrjnGBuf []int32 `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons][MaxDelay][MaxData] conductance buffer for accumulating spikes -- subslices are allocated to each projection -- uses int-encoded float values for faster GPU atomic integration"` + // conductance buffer for accumulating spikes -- subslices are allocated to each projection -- uses int-encoded float values for faster GPU atomic integration + PrjnGBuf []int32 `view:"-"` - // [view: -] [Layers][RecvPrjns][RecvNeurons][MaxData] synaptic conductance integrated over time per projection per recv neurons -- spikes come in via PrjnBuf -- subslices are allocated to each projection - PrjnGSyns []float32 `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons][MaxData] synaptic conductance integrated over time per projection per recv neurons -- spikes come in via PrjnBuf -- subslices are allocated to each projection"` + // synaptic conductance integrated over time per projection per recv neurons -- spikes come in via PrjnBuf -- subslices are allocated to each projection + PrjnGSyns []float32 `view:"-"` - // [view: -] [Layers][RecvPrjns] indexes into Prjns (organized by SendPrjn) organized by recv projections -- needed for iterating through recv prjns efficiently on GPU. - RecvPrjnIdxs []uint32 `view:"-" desc:"[Layers][RecvPrjns] indexes into Prjns (organized by SendPrjn) organized by recv projections -- needed for iterating through recv prjns efficiently on GPU."` + // indexes into Prjns (organized by SendPrjn) organized by recv projections -- needed for iterating through recv prjns efficiently on GPU. + RecvPrjnIdxs []uint32 `view:"-"` - // [view: -] [Layers][RecvPrjns][RecvNeurons][Syns] indexes into Synapses for each recv neuron, organized into blocks according to PrjnRecvCon, for receiver-based access. - RecvSynIdxs []uint32 `view:"-" desc:"[Layers][RecvPrjns][RecvNeurons][Syns] indexes into Synapses for each recv neuron, organized into blocks according to PrjnRecvCon, for receiver-based access."` + // indexes into Synapses for each recv neuron, organized into blocks according to PrjnRecvCon, for receiver-based access. + RecvSynIdxs []uint32 `view:"-"` - // [In / Targ Layers][Neurons][Data] external input values for all Input / Target / Compare layers in the network -- the ApplyExt methods write to this per layer, and it is then actually applied in one consistent method. - Exts []float32 `desc:"[In / Targ Layers][Neurons][Data] external input values for all Input / Target / Compare layers in the network -- the ApplyExt methods write to this per layer, and it is then actually applied in one consistent method."` + // external input values for all Input / Target / Compare layers in the network -- the ApplyExt methods write to this per layer, and it is then actually applied in one consistent method. + Exts []float32 - // [view: -] context used only for accessing neurons for display -- NetIdxs.NData in here is copied from active context in NewState - Ctx Context `view:"-" desc:"context used only for accessing neurons for display -- NetIdxs.NData in here is copied from active context in NewState"` + // context used only for accessing neurons for display -- NetIdxs.NData in here is copied from active context in NewState + Ctx Context `view:"-"` - // [view: -] random number generator for the network -- all random calls must use this -- set seed here for weight initialization values - Rand erand.SysRand `view:"-" desc:"random number generator for the network -- all random calls must use this -- set seed here for weight initialization values"` + // random number generator for the network -- all random calls must use this -- set seed here for weight initialization values + Rand erand.SysRand `view:"-"` // random seed to be set at the start of configuring the network and initializing the weights -- set this to get a different set of weights - RndSeed int64 `inactive:"+" desc:"random seed to be set at the start of configuring the network and initializing the weights -- set this to get a different set of weights"` + RndSeed int64 `inactive:"+"` // number of threads to use for parallel processing - NThreads int `desc:"number of threads to use for parallel processing"` + NThreads int - // [view: inline] GPU implementation - GPU GPU `view:"inline" desc:"GPU implementation"` + // GPU implementation + GPU GPU `view:"inline"` - // [view: -] record function timer information - RecFunTimes bool `view:"-" desc:"record function timer information"` + // record function timer information + RecFunTimes bool `view:"-"` - // [view: -] timers for each major function (step of processing) - FunTimes map[string]*timer.Time `view:"-" desc:"timers for each major function (step of processing)"` + // timers for each major function (step of processing) + FunTimes map[string]*timer.Time `view:"-"` } // emer.Network interface methods: diff --git a/axon/networkbase_test.go b/axon/networkbase_test.go index d91cd0a4f..57eba9b0b 100644 --- a/axon/networkbase_test.go +++ b/axon/networkbase_test.go @@ -3,7 +3,7 @@ package axon import ( "testing" - "github.com/emer/emergent/prjn" + "github.com/emer/emergent/v2/prjn" "github.com/stretchr/testify/assert" ) diff --git a/axon/neuromod.go b/axon/neuromod.go index 2979b08c7..3a39f66b2 100644 --- a/axon/neuromod.go +++ b/axon/neuromod.go @@ -7,7 +7,7 @@ package axon import ( "github.com/goki/gosl/slbool" "github.com/goki/ki/kit" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //go:generate stringer -type=DAModTypes @@ -63,31 +63,31 @@ const ( type NeuroModParams struct { // dopamine receptor-based effects of dopamine modulation on excitatory and inhibitory conductances: D1 is excitatory, D2 is inhibitory as a function of increasing dopamine - DAMod DAModTypes `desc:"dopamine receptor-based effects of dopamine modulation on excitatory and inhibitory conductances: D1 is excitatory, D2 is inhibitory as a function of increasing dopamine"` + DAMod DAModTypes // valence coding of this layer -- may affect specific layer types but does not directly affect neuromodulators currently - Valence ValenceTypes `desc:"valence coding of this layer -- may affect specific layer types but does not directly affect neuromodulators currently"` + Valence ValenceTypes - // [viewif: DAMod!=NoDAMod] multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor - DAModGain float32 `viewif:"DAMod!=NoDAMod" desc:"multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor"` + // multiplicative factor on overall DA modulation specified by DAMod -- resulting overall gain factor is: 1 + DAModGain * DA, where DA is appropriate DA-driven factor + DAModGain float32 `viewif:"DAMod!=NoDAMod"` // modulate the sign of the learning rate factor according to the DA sign, taking into account the DAMod sign reversal for D2Mod, also using BurstGain and DipGain to modulate DA value -- otherwise, only the magnitude of the learning rate is modulated as a function of raw DA magnitude according to DALRateMod (without additional gain factors) - DALRateSign slbool.Bool `desc:"modulate the sign of the learning rate factor according to the DA sign, taking into account the DAMod sign reversal for D2Mod, also using BurstGain and DipGain to modulate DA value -- otherwise, only the magnitude of the learning rate is modulated as a function of raw DA magnitude according to DALRateMod (without additional gain factors)"` + DALRateSign slbool.Bool - // [viewif: !DALRateSign] [min: 0] [max: 1] if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100% - DALRateMod float32 `min:"0" max:"1" viewif:"!DALRateSign" desc:"if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100%"` + // if not using DALRateSign, this is the proportion of maximum learning rate that Abs(DA) magnitude can modulate -- e.g., if 0.2, then DA = 0 = 80% of std learning rate, 1 = 100% + DALRateMod float32 `min:"0" max:"1" viewif:"!DALRateSign"` - // [min: 0] [max: 1] proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100% - AChLRateMod float32 `min:"0" max:"1" desc:"proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100%"` + // proportion of maximum learning rate that ACh can modulate -- e.g., if 0.2, then ACh = 0 = 80% of std learning rate, 1 = 100% + AChLRateMod float32 `min:"0" max:"1"` - // [def: 0,5] [min: 0] amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory - AChDisInhib float32 `min:"0" def:"0,5" desc:"amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory"` + // amount of extra Gi inhibition added in proportion to 1 - ACh level -- makes ACh disinhibitory + AChDisInhib float32 `min:"0" def:"0,5"` - // [def: 1] [min: 0] multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! - BurstGain float32 `min:"0" def:"1" desc:"multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign!"` + // multiplicative gain factor applied to positive dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! + BurstGain float32 `min:"0" def:"1"` - // [def: 1] [min: 0] multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext - DipGain float32 `min:"0" def:"1" desc:"multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext"` + // multiplicative gain factor applied to negative dopamine signals -- this operates on the raw dopamine signal prior to any effect of D2 receptors in reversing its sign! should be small for acq, but roughly equal to burst for ext + DipGain float32 `min:"0" def:"1"` pad, pad1, pad2 float32 } diff --git a/axon/neuron.go b/axon/neuron.go index 93bb153b8..bf0194121 100644 --- a/axon/neuron.go +++ b/axon/neuron.go @@ -7,7 +7,7 @@ package axon import ( "fmt" - "github.com/emer/emergent/netview" + "github.com/emer/emergent/v2/netview" "github.com/goki/ki/kit" ) diff --git a/axon/pcore_layers.go b/axon/pcore_layers.go index 601610139..56bba03c0 100644 --- a/axon/pcore_layers.go +++ b/axon/pcore_layers.go @@ -9,8 +9,8 @@ import ( "strings" "github.com/goki/gosl/slbool" - "github.com/goki/ki/bools" "github.com/goki/ki/kit" + "goki.dev/glop/num" ) //gosl: start pcore_layers @@ -23,32 +23,32 @@ import ( // Must set Learn.NeuroMod.DAMod = D1Mod or D2Mod via SetBuildConfig("DAMod"). type MatrixParams struct { - // [def: 0.05] threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated - GateThr float32 `def:"0.05" desc:"threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated"` + // threshold on layer Avg SpkMax for Matrix Go and VThal layers to count as having gated + GateThr float32 `def:"0.05"` // is this a ventral striatum (VS) matrix layer? if true, the gating status of this layer is recorded in the Global state, and used for updating effort and other factors. - IsVS slbool.Bool `desc:"is this a ventral striatum (VS) matrix layer? if true, the gating status of this layer is recorded in the Global state, and used for updating effort and other factors."` + IsVS slbool.Bool // index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName - OtherMatrixIdx int32 `inactive:"+" desc:"index of other matrix (Go if we are NoGo and vice-versa). Set during Build from BuildConfig OtherMatrixName"` + OtherMatrixIdx int32 `inactive:"+"` // index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used - ThalLay1Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay1Name if present -- -1 if not used"` + ThalLay1Idx int32 `inactive:"+"` // index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay2Name if present -- -1 if not used - ThalLay2Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay2Name if present -- -1 if not used"` + ThalLay2Idx int32 `inactive:"+"` // index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay3Name if present -- -1 if not used - ThalLay3Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay3Name if present -- -1 if not used"` + ThalLay3Idx int32 `inactive:"+"` // index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay4Name if present -- -1 if not used - ThalLay4Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay4Name if present -- -1 if not used"` + ThalLay4Idx int32 `inactive:"+"` // index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay5Name if present -- -1 if not used - ThalLay5Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay5Name if present -- -1 if not used"` + ThalLay5Idx int32 `inactive:"+"` // index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay6Name if present -- -1 if not used - ThalLay6Idx int32 `inactive:"+" desc:"index of thalamus layer that we gate. needed to get gating information. Set during Build from BuildConfig ThalLay6Name if present -- -1 if not used"` + ThalLay6Idx int32 `inactive:"+"` pad, pad1, pad2 int32 } @@ -87,8 +87,8 @@ const ( // Typically just a single unit per Pool representing a given stripe. type GPParams struct { - // [view: inline] [viewif: LayType=GPLayer] type of GP Layer -- must set during config using SetBuildConfig of GPType. - GPType GPLayerTypes `viewif:"LayType=GPLayer" view:"inline" desc:"type of GP Layer -- must set during config using SetBuildConfig of GPType."` + // type of GP Layer -- must set during config using SetBuildConfig of GPType. + GPType GPLayerTypes `viewif:"LayType=GPLayer" view:"inline"` pad, pad1, pad2 uint32 } @@ -164,7 +164,7 @@ func (ly *Layer) MatrixGated(ctx *Context) { } } if ctx.PlusPhase.IsTrue() && ly.Params.Matrix.IsVS.IsTrue() { - SetGlbV(ctx, di, GvVSMatrixJustGated, bools.ToFloat32(mtxGated)) + SetGlbV(ctx, di, GvVSMatrixJustGated, num.FromBool[float32](mtxGated)) if mtxGated { SetGlbUSposV(ctx, di, GvVSMatrixPoolGated, uint32(poolIdx), 1) } diff --git a/axon/pcore_net.go b/axon/pcore_net.go index 6567d068c..545b02aae 100644 --- a/axon/pcore_net.go +++ b/axon/pcore_net.go @@ -5,7 +5,7 @@ package axon import ( - "github.com/emer/emergent/prjn" + "github.com/emer/emergent/v2/prjn" ) // AddBG adds MtxGo, MtxNo, GPeOut, GPeIn, GPeTA, STNp, STNs, GPi layers, diff --git a/axon/pool.go b/axon/pool.go index 14abedc62..672c2914e 100644 --- a/axon/pool.go +++ b/axon/pool.go @@ -7,7 +7,7 @@ package axon import ( "github.com/emer/axon/fsfffb" "github.com/goki/gosl/slbool" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //gosl: hlsl pool @@ -24,17 +24,17 @@ import ( // based on values from the prior cycle -- thus are 1 cycle behind in general. type AvgMaxPhases struct { - // [view: inline] updated every cycle -- this is the source of all subsequent time scales - Cycle AvgMaxI32 `view:"inline" desc:"updated every cycle -- this is the source of all subsequent time scales"` + // updated every cycle -- this is the source of all subsequent time scales + Cycle AvgMaxI32 `view:"inline"` - // [view: inline] at the end of the minus phase - Minus AvgMaxI32 `view:"inline" desc:"at the end of the minus phase"` + // at the end of the minus phase + Minus AvgMaxI32 `view:"inline"` - // [view: inline] at the end of the plus phase - Plus AvgMaxI32 `view:"inline" desc:"at the end of the plus phase"` + // at the end of the plus phase + Plus AvgMaxI32 `view:"inline"` - // [view: inline] at the end of the previous plus phase - Prev AvgMaxI32 `view:"inline" desc:"at the end of the previous plus phase"` + // at the end of the previous plus phase + Prev AvgMaxI32 `view:"inline"` } // CycleToMinus grabs current Cycle values into the Minus phase values @@ -67,23 +67,23 @@ func (am *AvgMaxPhases) Zero() { // based on values from the prior cycle -- thus are 1 cycle behind in general. type PoolAvgMax struct { - // [view: inline] avg and maximum CaSpkP (continuously updated at roughly 40 msec integration window timescale, ends up capturing potentiation, plus-phase signal) -- this is the primary variable to use for tracking overall pool activity - CaSpkP AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum CaSpkP (continuously updated at roughly 40 msec integration window timescale, ends up capturing potentiation, plus-phase signal) -- this is the primary variable to use for tracking overall pool activity"` + // avg and maximum CaSpkP (continuously updated at roughly 40 msec integration window timescale, ends up capturing potentiation, plus-phase signal) -- this is the primary variable to use for tracking overall pool activity + CaSpkP AvgMaxPhases `inactive:"+" view:"inline"` - // [view: inline] avg and maximum CaSpkD longer-term depression / DAPK1 signal in layer - CaSpkD AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum CaSpkD longer-term depression / DAPK1 signal in layer"` + // avg and maximum CaSpkD longer-term depression / DAPK1 signal in layer + CaSpkD AvgMaxPhases `inactive:"+" view:"inline"` - // [view: inline] avg and maximum SpkMax value (based on CaSpkP) -- reflects peak activity at any point across the cycle - SpkMax AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum SpkMax value (based on CaSpkP) -- reflects peak activity at any point across the cycle"` + // avg and maximum SpkMax value (based on CaSpkP) -- reflects peak activity at any point across the cycle + SpkMax AvgMaxPhases `inactive:"+" view:"inline"` - // [view: inline] avg and maximum Act firing rate value - Act AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum Act firing rate value"` + // avg and maximum Act firing rate value + Act AvgMaxPhases `inactive:"+" view:"inline"` - // [view: inline] avg and maximum GeInt integrated running-average excitatory conductance value - GeInt AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum GeInt integrated running-average excitatory conductance value"` + // avg and maximum GeInt integrated running-average excitatory conductance value + GeInt AvgMaxPhases `inactive:"+" view:"inline"` - // [view: inline] avg and maximum GiInt integrated running-average inhibitory conductance value - GiInt AvgMaxPhases `inactive:"+" view:"inline" desc:"avg and maximum GiInt integrated running-average inhibitory conductance value"` + // avg and maximum GiInt integrated running-average inhibitory conductance value + GiInt AvgMaxPhases `inactive:"+" view:"inline"` } // SetN sets the N for aggregation @@ -177,33 +177,33 @@ func (am *PoolAvgMax) Calc(refIdx int32) { type Pool struct { // starting and ending (exlusive) layer-wise indexes for the list of neurons in this pool - StIdx, EdIdx uint32 `inactive:"+" desc:"starting and ending (exlusive) layer-wise indexes for the list of neurons in this pool"` + StIdx, EdIdx uint32 `inactive:"+"` - // [view: -] layer index in global layer list - LayIdx uint32 `view:"-" desc:"layer index in global layer list"` + // layer index in global layer list + LayIdx uint32 `view:"-"` - // [view: -] data parallel index (innermost index per layer) - DataIdx uint32 `view:"-" desc:"data parallel index (innermost index per layer)"` + // data parallel index (innermost index per layer) + DataIdx uint32 `view:"-"` - // [view: -] pool index in global pool list: [Layer][Pool][Data] - PoolIdx uint32 `view:"-" desc:"pool index in global pool list: [Layer][Pool][Data]"` + // pool index in global pool list: + PoolIdx uint32 `view:"-"` // is this a layer-wide pool? if not, it represents a sub-pool of units within a 4D layer - IsLayPool slbool.Bool `inactive:"+" desc:"is this a layer-wide pool? if not, it represents a sub-pool of units within a 4D layer"` + IsLayPool slbool.Bool `inactive:"+"` // for special types where relevant (e.g., MatrixLayer, BGThalLayer), indicates if the pool was gated - Gated slbool.Bool `inactive:"+" desc:"for special types where relevant (e.g., MatrixLayer, BGThalLayer), indicates if the pool was gated"` + Gated slbool.Bool `inactive:"+"` pad uint32 // fast-slow FFFB inhibition values - Inhib fsfffb.Inhib `inactive:"+" desc:"fast-slow FFFB inhibition values"` + Inhib fsfffb.Inhib `inactive:"+"` // average and max values for relevant variables in this pool, at different time scales - AvgMax PoolAvgMax `desc:"average and max values for relevant variables in this pool, at different time scales"` + AvgMax PoolAvgMax - // [view: inline] absolute value of AvgDif differences from actual neuron ActPct relative to TrgAvg - AvgDif AvgMaxI32 `inactive:"+" view:"inline" desc:"absolute value of AvgDif differences from actual neuron ActPct relative to TrgAvg"` + // absolute value of AvgDif differences from actual neuron ActPct relative to TrgAvg + AvgDif AvgMaxI32 `inactive:"+" view:"inline"` } // Init is callled during InitActs diff --git a/axon/pool_test.go b/axon/pool_test.go index 6beba9bc2..b77a1e32c 100644 --- a/axon/pool_test.go +++ b/axon/pool_test.go @@ -10,8 +10,8 @@ import ( "os" "testing" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" ) // Note: subsequent params applied after Base diff --git a/axon/prjn.go b/axon/prjn.go index 5a936cb6e..f8e1cd076 100644 --- a/axon/prjn.go +++ b/axon/prjn.go @@ -9,12 +9,12 @@ import ( "io" "strconv" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/weights" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/weights" "github.com/goki/ki/indent" "github.com/goki/ki/ki" "github.com/goki/ki/kit" + "goki.dev/etable/v2/etensor" ) // https://github.com/kisvegabor/abbreviations-in-code suggests Buf instead of Buff @@ -28,7 +28,7 @@ type Prjn struct { PrjnBase // all prjn-level parameters -- these must remain constant once configured - Params *PrjnParams `desc:"all prjn-level parameters -- these must remain constant once configured"` + Params *PrjnParams } var KiT_Prjn = kit.Types.AddType(&Prjn{}, PrjnProps) diff --git a/axon/prjnbase.go b/axon/prjnbase.go index cec124f93..a6ef5dabd 100644 --- a/axon/prjnbase.go +++ b/axon/prjnbase.go @@ -8,13 +8,13 @@ import ( "errors" "log" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/etable/etensor" - "github.com/emer/etable/minmax" - "github.com/goki/gi/giv" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "goki.dev/etable/v2/etensor" + "goki.dev/etable/v2/minmax" + "goki.dev/gi/v2/giv" + "goki.dev/mat32/v2" ) // index naming: @@ -31,68 +31,68 @@ import ( // accessed via the AxonPrj field. type PrjnBase struct { - // [view: -] we need a pointer to ourselves as an AxonPrjn, which can always be used to extract the true underlying type of object when prjn is embedded in other structs -- function receivers do not have this ability so this is necessary. - AxonPrj AxonPrjn `copy:"-" json:"-" xml:"-" view:"-" desc:"we need a pointer to ourselves as an AxonPrjn, which can always be used to extract the true underlying type of object when prjn is embedded in other structs -- function receivers do not have this ability so this is necessary."` + // we need a pointer to ourselves as an AxonPrjn, which can always be used to extract the true underlying type of object when prjn is embedded in other structs -- function receivers do not have this ability so this is necessary. + AxonPrj AxonPrjn `copy:"-" json:"-" xml:"-" view:"-"` // inactivate this projection -- allows for easy experimentation - Off bool `desc:"inactivate this projection -- allows for easy experimentation"` + Off bool // Class is for applying parameter styles, can be space separated multple tags - Cls string `desc:"Class is for applying parameter styles, can be space separated multple tags"` + Cls string // can record notes about this projection here - Notes string `desc:"can record notes about this projection here"` + Notes string // sending layer for this projection - Send *Layer `desc:"sending layer for this projection"` + Send *Layer // receiving layer for this projection - Recv *Layer `desc:"receiving layer for this projection"` + Recv *Layer - // [tableview: -] pattern of connectivity - Pat prjn.Pattern `tableview:"-" desc:"pattern of connectivity"` + // pattern of connectivity + Pat prjn.Pattern `tableview:"-"` // type of projection -- Forward, Back, Lateral, or extended type in specialized algorithms -- matches against .Cls parameter styles (e.g., .Back etc) - Typ PrjnTypes `desc:"type of projection -- Forward, Back, Lateral, or extended type in specialized algorithms -- matches against .Cls parameter styles (e.g., .Back etc)"` + Typ PrjnTypes - // [tableview: -] default parameters that are applied prior to user-set parameters -- these are useful for specific functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a prjn type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map. - DefParams params.Params `tableview:"-" desc:"default parameters that are applied prior to user-set parameters -- these are useful for specific functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a prjn type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map."` + // default parameters that are applied prior to user-set parameters -- these are useful for specific functionality in specialized brain areas (e.g., PVLV, BG etc) not associated with a prjn type, which otherwise is used to hard-code initial default parameters -- typically just set to a literal map. + DefParams params.Params `tableview:"-"` - // [tableview: -] provides a history of parameters applied to the layer - ParamsHistory params.HistoryImpl `tableview:"-" desc:"provides a history of parameters applied to the layer"` + // provides a history of parameters applied to the layer + ParamsHistory params.HistoryImpl `tableview:"-"` - // [view: inline] [tableview: -] average and maximum number of recv connections in the receiving layer - RecvConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline" desc:"average and maximum number of recv connections in the receiving layer"` + // average and maximum number of recv connections in the receiving layer + RecvConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline"` - // [view: inline] [tableview: -] average and maximum number of sending connections in the sending layer - SendConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline" desc:"average and maximum number of sending connections in the sending layer"` + // average and maximum number of sending connections in the sending layer + SendConNAvgMax minmax.AvgMax32 `tableview:"-" inactive:"+" view:"inline"` - // [view: -] start index into global Synapse array: [Layer][SendPrjns][Synapses] - SynStIdx uint32 `view:"-" desc:"start index into global Synapse array: [Layer][SendPrjns][Synapses]"` + // start index into global Synapse array: + SynStIdx uint32 `view:"-"` - // [view: -] number of synapses in this projection - NSyns uint32 `view:"-" desc:"number of synapses in this projection"` + // number of synapses in this projection + NSyns uint32 `view:"-"` - // [view: -] [RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnRecvCons slice for GPU usage. - RecvCon []StartN `view:"-" desc:"[RecvNeurons] starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnRecvCons slice for GPU usage."` + // starting offset and N cons for each recv neuron, for indexing into the RecvSynIdx array of indexes into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnRecvCons slice for GPU usage. + RecvCon []StartN `view:"-"` - // [view: -] [SendNeurons][SendCon.N RecvNeurons] index into Syns synaptic state for each sending unit and connection within that, for the sending projection which does not own the synapses, and instead indexes into recv-ordered list - RecvSynIdx []uint32 `view:"-" desc:"[SendNeurons][SendCon.N RecvNeurons] index into Syns synaptic state for each sending unit and connection within that, for the sending projection which does not own the synapses, and instead indexes into recv-ordered list"` + // index into Syns synaptic state for each sending unit and connection within that, for the sending projection which does not own the synapses, and instead indexes into recv-ordered list + RecvSynIdx []uint32 `view:"-"` - // [view: -] [RecvNeurons][RecvCon.N SendingNeurons] for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse. - RecvConIdx []uint32 `view:"-" desc:"[RecvNeurons][RecvCon.N SendingNeurons] for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse."` + // for each recv synapse, this is index of *sending* neuron It is generally preferable to use the Synapse SendIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse. + RecvConIdx []uint32 `view:"-"` - // [view: -] [SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnSendCons slice for GPU usage. - SendCon []StartN `view:"-" desc:"[SendNeurons] starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnSendCons slice for GPU usage."` + // starting offset and N cons for each sending neuron, for indexing into the Syns synapses, which are organized sender-based. This is locally-managed during build process, but also copied to network global PrjnSendCons slice for GPU usage. + SendCon []StartN `view:"-"` - // [view: -] [SendNeurons[[SendCon.N RecvNeurons] index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse. - SendConIdx []uint32 `view:"-" desc:"[SendNeurons[[SendCon.N RecvNeurons] index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse."` + // index of other neuron that receives the sender's synaptic input, ordered by the sending layer's order of units as the outer loop, and SendCon.N receiving units within that. It is generally preferable to use the Synapse RecvIdx where needed, instead of this slice, because then the memory access will be close by other values on the synapse. + SendConIdx []uint32 `view:"-"` - // [view: -] [RecvNeurons][Params.Com.MaxDelay][MaxData] Ge or Gi conductance ring buffer for each neuron, accessed through Params.Com.ReadIdx, WriteIdx -- scale * weight is added with Com delay offset -- a subslice from network PrjnGBuf. Uses int-encoded float values for faster GPU atomic integration - GBuf []int32 `view:"-" desc:"[RecvNeurons][Params.Com.MaxDelay][MaxData] Ge or Gi conductance ring buffer for each neuron, accessed through Params.Com.ReadIdx, WriteIdx -- scale * weight is added with Com delay offset -- a subslice from network PrjnGBuf. Uses int-encoded float values for faster GPU atomic integration"` + // Ge or Gi conductance ring buffer for each neuron, accessed through Params.Com.ReadIdx, WriteIdx -- scale * weight is added with Com delay offset -- a subslice from network PrjnGBuf. Uses int-encoded float values for faster GPU atomic integration + GBuf []int32 `view:"-"` - // [view: -] [RecvNeurons][MaxData] projection-level synaptic conductance values, integrated by prjn before being integrated at the neuron level, which enables the neuron to perform non-linear integration as needed -- a subslice from network PrjnGSyn. - GSyns []float32 `view:"-" desc:"[RecvNeurons][MaxData] projection-level synaptic conductance values, integrated by prjn before being integrated at the neuron level, which enables the neuron to perform non-linear integration as needed -- a subslice from network PrjnGSyn."` + // projection-level synaptic conductance values, integrated by prjn before being integrated at the neuron level, which enables the neuron to perform non-linear integration as needed -- a subslice from network PrjnGSyn. + GSyns []float32 `view:"-"` } // emer.Prjn interface diff --git a/axon/prjnparams.go b/axon/prjnparams.go index 6994bbdc4..69096b7c6 100644 --- a/axon/prjnparams.go +++ b/axon/prjnparams.go @@ -8,7 +8,7 @@ import ( "encoding/json" "strings" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //gosl: hlsl prjnparams @@ -31,10 +31,10 @@ import ( type StartN struct { // starting offset - Start uint32 `desc:"starting offset"` + Start uint32 - // number of items -- [Start:Start+N] - N uint32 `desc:"number of items -- [Start:Start+N]"` + // number of items -- + N uint32 pad, pad1 uint32 // todo: see if we can do without these? } @@ -77,10 +77,10 @@ func (pi *PrjnIdxs) SendNIdxToLayIdx(ni uint32) uint32 { type GScaleVals struct { // scaling factor for integrating synaptic input conductances (G's), originally computed as a function of sending layer activity and number of connections, and typically adapted from there -- see Prjn.PrjnScale adapt params - Scale float32 `inactive:"+" desc:"scaling factor for integrating synaptic input conductances (G's), originally computed as a function of sending layer activity and number of connections, and typically adapted from there -- see Prjn.PrjnScale adapt params"` + Scale float32 `inactive:"+"` // normalized relative proportion of total receiving conductance for this projection: PrjnScale.Rel / sum(PrjnScale.Rel across relevant prjns) - Rel float32 `inactive:"+" desc:"normalized relative proportion of total receiving conductance for this projection: PrjnScale.Rel / sum(PrjnScale.Rel across relevant prjns)"` + Rel float32 `inactive:"+"` pad, pad1 float32 } @@ -91,39 +91,39 @@ type GScaleVals struct { type PrjnParams struct { // functional type of prjn -- determines functional code path for specialized layer types, and is synchronized with the Prjn.Typ value - PrjnType PrjnTypes `desc:"functional type of prjn -- determines functional code path for specialized layer types, and is synchronized with the Prjn.Typ value"` + PrjnType PrjnTypes pad, pad1, pad2 int32 - // [view: -] recv and send neuron-level projection index array access info - Idxs PrjnIdxs `view:"-" desc:"recv and send neuron-level projection index array access info"` + // recv and send neuron-level projection index array access info + Idxs PrjnIdxs `view:"-"` - // [view: inline] synaptic communication parameters: delay, probability of failure - Com SynComParams `view:"inline" desc:"synaptic communication parameters: delay, probability of failure"` + // synaptic communication parameters: delay, probability of failure + Com SynComParams `view:"inline"` - // [view: inline] projection scaling parameters for computing GScale: modulates overall strength of projection, using both absolute and relative factors, with adaptation option to maintain target max conductances - PrjnScale PrjnScaleParams `view:"inline" desc:"projection scaling parameters for computing GScale: modulates overall strength of projection, using both absolute and relative factors, with adaptation option to maintain target max conductances"` + // projection scaling parameters for computing GScale: modulates overall strength of projection, using both absolute and relative factors, with adaptation option to maintain target max conductances + PrjnScale PrjnScaleParams `view:"inline"` - // [view: add-fields] slowly adapting, structural weight value parameters, which control initial weight values and slower outer-loop adjustments - SWts SWtParams `view:"add-fields" desc:"slowly adapting, structural weight value parameters, which control initial weight values and slower outer-loop adjustments"` + // slowly adapting, structural weight value parameters, which control initial weight values and slower outer-loop adjustments + SWts SWtParams `view:"add-fields"` - // [view: add-fields] synaptic-level learning parameters for learning in the fast LWt values. - Learn LearnSynParams `view:"add-fields" desc:"synaptic-level learning parameters for learning in the fast LWt values."` + // synaptic-level learning parameters for learning in the fast LWt values. + Learn LearnSynParams `view:"add-fields"` - // [view: inline] conductance scaling values - GScale GScaleVals `view:"inline" desc:"conductance scaling values"` + // conductance scaling values + GScale GScaleVals `view:"inline"` - // [view: inline] [viewif: PrjnType=[RWPrjn,TDPredPrjn]] Params for RWPrjn and TDPredPrjn for doing dopamine-modulated learning for reward prediction: Da * Send activity. Use in RWPredLayer or TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only. - RLPred RLPredPrjnParams `viewif:"PrjnType=[RWPrjn,TDPredPrjn]" view:"inline" desc:"Params for RWPrjn and TDPredPrjn for doing dopamine-modulated learning for reward prediction: Da * Send activity. Use in RWPredLayer or TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only."` + // ] Params for RWPrjn and TDPredPrjn for doing dopamine-modulated learning for reward prediction: Da * Send activity. Use in RWPredLayer or TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only. + RLPred RLPredPrjnParams `viewif:"PrjnType=[RWPrjn,TDPredPrjn]" view:"inline"` - // [view: inline] [viewif: PrjnType=MatrixPrjn] for trace-based learning in the MatrixPrjn. A trace of synaptic co-activity is formed, and then modulated by dopamine whenever it occurs. This bridges the temporal gap between gating activity and subsequent activity, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level from CINs. - Matrix MatrixPrjnParams `viewif:"PrjnType=MatrixPrjn" view:"inline" desc:"for trace-based learning in the MatrixPrjn. A trace of synaptic co-activity is formed, and then modulated by dopamine whenever it occurs. This bridges the temporal gap between gating activity and subsequent activity, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level from CINs."` + // for trace-based learning in the MatrixPrjn. A trace of synaptic co-activity is formed, and then modulated by dopamine whenever it occurs. This bridges the temporal gap between gating activity and subsequent activity, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level from CINs. + Matrix MatrixPrjnParams `viewif:"PrjnType=MatrixPrjn" view:"inline"` - // [view: inline] [viewif: PrjnType=BLAPrjn] Basolateral Amygdala projection parameters. - BLA BLAPrjnParams `viewif:"PrjnType=BLAPrjn" view:"inline" desc:"Basolateral Amygdala projection parameters."` + // Basolateral Amygdala projection parameters. + BLA BLAPrjnParams `viewif:"PrjnType=BLAPrjn" view:"inline"` - // [view: inline] [viewif: PrjnType=HipPrjn] Hip bench parameters. - Hip HipPrjnParams `viewif:"PrjnType=HipPrjn" view:"inline" desc:"Hip bench parameters."` + // Hip bench parameters. + Hip HipPrjnParams `viewif:"PrjnType=HipPrjn" view:"inline"` } func (pj *PrjnParams) Defaults() { diff --git a/axon/pvlv.go b/axon/pvlv.go index 9a8e6cf48..da0c9c8ad 100644 --- a/axon/pvlv.go +++ b/axon/pvlv.go @@ -5,9 +5,9 @@ package axon import ( - "github.com/emer/emergent/erand" - "github.com/goki/ki/bools" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/erand" + "goki.dev/glop/num" + "goki.dev/mat32/v2" ) // DriveParams manages the drive parameters for computing and updating drive state. @@ -17,19 +17,19 @@ import ( type DriveParams struct { // minimum effective drive value -- this is an automatic baseline ensuring that a positive US results in at least some minimal level of reward. Unlike Base values, this is not reflected in the activity of the drive values -- applies at the time of reward calculation as a minimum baseline. - DriveMin float32 `desc:"minimum effective drive value -- this is an automatic baseline ensuring that a positive US results in at least some minimal level of reward. Unlike Base values, this is not reflected in the activity of the drive values -- applies at the time of reward calculation as a minimum baseline."` + DriveMin float32 // baseline levels for each drive -- what they naturally trend toward in the absence of any input. Set inactive drives to 0 baseline, active ones typically elevated baseline (0-1 range). - Base []float32 `desc:"baseline levels for each drive -- what they naturally trend toward in the absence of any input. Set inactive drives to 0 baseline, active ones typically elevated baseline (0-1 range)."` + Base []float32 // time constants in ThetaCycle (trial) units for natural update toward Base values -- 0 values means no natural update. - Tau []float32 `desc:"time constants in ThetaCycle (trial) units for natural update toward Base values -- 0 values means no natural update."` + Tau []float32 // decrement in drive value when US is consumed, thus partially satisfying the drive -- positive values are subtracted from current Drive value. - Satisfaction []float32 `desc:"decrement in drive value when US is consumed, thus partially satisfying the drive -- positive values are subtracted from current Drive value."` + Satisfaction []float32 - // [view: -] 1/Tau - Dt []float32 `view:"-" desc:"1/Tau"` + // 1/Tau + Dt []float32 `view:"-"` } func (dp *DriveParams) Alloc(nDrives int) { @@ -151,13 +151,13 @@ func (dp *DriveParams) EffectiveDrive(ctx *Context, di uint32, i uint32) float32 type UrgencyParams struct { // value of raw urgency where the urgency activation level is 50% - U50 float32 `desc:"value of raw urgency where the urgency activation level is 50%"` + U50 float32 - // [def: 4] exponent on the urge factor -- valid numbers are 1,2,4,6 - Power int32 `def:"4" desc:"exponent on the urge factor -- valid numbers are 1,2,4,6"` + // exponent on the urge factor -- valid numbers are 1,2,4,6 + Power int32 `def:"4"` - // [def: 0.2] threshold for urge -- cuts off small baseline values - Thr float32 `def:"0.2" desc:"threshold for urge -- cuts off small baseline values"` + // threshold for urge -- cuts off small baseline values + Thr float32 `def:"0.2"` } func (ur *UrgencyParams) Defaults() { @@ -220,26 +220,26 @@ func PVLVNormFun(raw float32) float32 { // weighted and integrated to compute an overall PV primary value. type USParams struct { - // [def: 0.5] threshold for a negative US increment, _after_ multiplying by the USnegGains factor for that US (to allow for normalized input magnitudes that may translate into different magnitude of effects), to drive a phasic ACh response and associated VSMatrix gating and dopamine firing -- i.e., a full negative US outcome event (global NegUSOutcome flag is set) - NegUSOutcomeThr float32 `def:"0.5" desc:"threshold for a negative US increment, _after_ multiplying by the USnegGains factor for that US (to allow for normalized input magnitudes that may translate into different magnitude of effects), to drive a phasic ACh response and associated VSMatrix gating and dopamine firing -- i.e., a full negative US outcome event (global NegUSOutcome flag is set)"` + // threshold for a negative US increment, _after_ multiplying by the USnegGains factor for that US (to allow for normalized input magnitudes that may translate into different magnitude of effects), to drive a phasic ACh response and associated VSMatrix gating and dopamine firing -- i.e., a full negative US outcome event (global NegUSOutcome flag is set) + NegUSOutcomeThr float32 `def:"0.5"` - // [def: 2] gain factor applied to sum of weighted, drive-scaled positive USs to compute PVpos primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust the overall scaling of PVpos reward within 0-1 normalized range (see also PVnegGain). Each USpos is assumed to be in 0-1 range, default 1. - PVposGain float32 `def:"2" desc:"gain factor applied to sum of weighted, drive-scaled positive USs to compute PVpos primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust the overall scaling of PVpos reward within 0-1 normalized range (see also PVnegGain). Each USpos is assumed to be in 0-1 range, default 1."` + // gain factor applied to sum of weighted, drive-scaled positive USs to compute PVpos primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust the overall scaling of PVpos reward within 0-1 normalized range (see also PVnegGain). Each USpos is assumed to be in 0-1 range, default 1. + PVposGain float32 `def:"2"` - // [def: 1] gain factor applied to sum of weighted negative USs to compute PVneg primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust overall scaling of PVneg within 0-1 normalized range (see also PVposGain). - PVnegGain float32 `def:"1" desc:"gain factor applied to sum of weighted negative USs to compute PVneg primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust overall scaling of PVneg within 0-1 normalized range (see also PVposGain)."` + // gain factor applied to sum of weighted negative USs to compute PVneg primary value summary -- multiplied prior to 1/(1+x) normalization. Use this to adjust overall scaling of PVneg within 0-1 normalized range (see also PVposGain). + PVnegGain float32 `def:"1"` // gain factor for each individual negative US, multiplied prior to 1/(1+x) normalization of each term for activating the OFCnegUS pools. These gains are _not_ applied in computing summary PVneg value (see PVnegWts), and generally must be larger than the weights to leverage the dynamic range within each US pool. - USnegGains []float32 `desc:"gain factor for each individual negative US, multiplied prior to 1/(1+x) normalization of each term for activating the OFCnegUS pools. These gains are _not_ applied in computing summary PVneg value (see PVnegWts), and generally must be larger than the weights to leverage the dynamic range within each US pool."` + USnegGains []float32 // weight factor applied to each separate positive US on the way to computing the overall PVpos summary value, to control the weighting of each US relative to the others. Each pos US is also multiplied by its dynamic Drive factor as well. Use PVposGain to control the overall scaling of the PVpos value. - PVposWts []float32 `desc:"weight factor applied to each separate positive US on the way to computing the overall PVpos summary value, to control the weighting of each US relative to the others. Each pos US is also multiplied by its dynamic Drive factor as well. Use PVposGain to control the overall scaling of the PVpos value."` + PVposWts []float32 // weight factor applied to each separate negative US on the way to computing the overall PVneg summary value, to control the weighting of each US relative to the others. The first pool is Time, second is Effort, and these are typically weighted lower (.02) than salient simulation-specific USs (1). - PVnegWts []float32 `desc:"weight factor applied to each separate negative US on the way to computing the overall PVneg summary value, to control the weighting of each US relative to the others. The first pool is Time, second is Effort, and these are typically weighted lower (.02) than salient simulation-specific USs (1)."` + PVnegWts []float32 // computed estimated US values, based on OFCposUSPT and VSMatrix gating, in PVposEst - USposEst []float32 `inactive:"+" desc:"computed estimated US values, based on OFCposUSPT and VSMatrix gating, in PVposEst"` + USposEst []float32 `inactive:"+"` } func (us *USParams) Alloc(nPos, nNeg int) { @@ -323,14 +323,14 @@ func (us *USParams) NegUSOutcome(ctx *Context, di uint32, usIdx int, mag float32 // or "relief" burst when actual neg < predicted. type LHbParams struct { - // [def: 1] threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward - NegThr float32 `def:"1" desc:"threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward"` + // threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward + NegThr float32 `def:"1"` - // [def: 1] gain multiplier on PVpos for purposes of generating bursts (not for discounting negative dips) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25) - BurstGain float32 `def:"1" desc:"gain multiplier on PVpos for purposes of generating bursts (not for discounting negative dips) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)"` + // gain multiplier on PVpos for purposes of generating bursts (not for discounting negative dips) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25) + BurstGain float32 `def:"1"` - // [def: 1] gain multiplier on PVneg for purposes of generating dips (not for discounting positive bursts) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25) - DipGain float32 `def:"1" desc:"gain multiplier on PVneg for purposes of generating dips (not for discounting positive bursts) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25)"` + // gain multiplier on PVneg for purposes of generating dips (not for discounting positive bursts) -- 4 renormalizes for typical ~.5 values (.5 * .5 = .25) + DipGain float32 `def:"1"` } func (lh *LHbParams) Defaults() { @@ -405,14 +405,14 @@ func (lh *LHbParams) DAforNoUS(ctx *Context, di uint32, vsPatchPos float32) floa // GiveUpParams are parameters for computing when to give up type GiveUpParams struct { - // [def: 1] threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward - NegThr float32 `def:"1" desc:"threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward"` + // threshold factor that multiplies integrated pvNeg value to establish a threshold for whether the integrated pvPos value is good enough to drive overall net positive reward + NegThr float32 `def:"1"` - // [def: 10] multiplier on pos - neg for logistic probability function -- higher gain values produce more binary give up behavior and lower values produce more graded stochastic behavior around the threshold - Gain float32 `def:"10" desc:"multiplier on pos - neg for logistic probability function -- higher gain values produce more binary give up behavior and lower values produce more graded stochastic behavior around the threshold"` + // multiplier on pos - neg for logistic probability function -- higher gain values produce more binary give up behavior and lower values produce more graded stochastic behavior around the threshold + Gain float32 `def:"10"` // minimum estimated PVpos value -- deals with any errors in the estimation process to make sure that erroneous GiveUp doesn't happen. - MinPVposEst float32 `desc:"minimum estimated PVpos value -- deals with any errors in the estimation process to make sure that erroneous GiveUp doesn't happen."` + MinPVposEst float32 } func (gp *GiveUpParams) Defaults() { @@ -450,25 +450,25 @@ func (gp *GiveUpParams) Prob(pvDiff float32, rnd erand.Rand) (float32, bool) { type PVLV struct { // number of possible positive US states and corresponding drives -- the first is always reserved for novelty / curiosity. Must be set programmatically via SetNUSs method, which allocates corresponding parameters. - NPosUSs uint32 `inactive:"+" desc:"number of possible positive US states and corresponding drives -- the first is always reserved for novelty / curiosity. Must be set programmatically via SetNUSs method, which allocates corresponding parameters."` + NPosUSs uint32 `inactive:"+"` - // number of possible negative US states -- [0] is reserved for accumulated time, [1] the accumulated effort cost. Must be set programmatically via SetNUSs method, which allocates corresponding parameters. - NNegUSs uint32 `inactive:"+" desc:"number of possible negative US states -- [0] is reserved for accumulated time, [1] the accumulated effort cost. Must be set programmatically via SetNUSs method, which allocates corresponding parameters."` + // number of possible negative US states -- is reserved for accumulated time, the accumulated effort cost. Must be set programmatically via SetNUSs method, which allocates corresponding parameters. + NNegUSs uint32 `inactive:"+"` // parameters and state for built-in drives that form the core motivations of agent, controlled by lateral hypothalamus and associated body state monitoring such as glucose levels and thirst. - Drive DriveParams `desc:"parameters and state for built-in drives that form the core motivations of agent, controlled by lateral hypothalamus and associated body state monitoring such as glucose levels and thirst."` + Drive DriveParams - // [view: inline] urgency (increasing pressure to do something) and parameters for updating it. Raw urgency is incremented by same units as effort, but is only reset with a positive US. - Urgency UrgencyParams `view:"inline" desc:"urgency (increasing pressure to do something) and parameters for updating it. Raw urgency is incremented by same units as effort, but is only reset with a positive US."` + // urgency (increasing pressure to do something) and parameters for updating it. Raw urgency is incremented by same units as effort, but is only reset with a positive US. + Urgency UrgencyParams `view:"inline"` // controls how positive and negative USs are weighted and integrated to compute an overall PV primary value. - USs USParams `desc:"controls how positive and negative USs are weighted and integrated to compute an overall PV primary value."` + USs USParams - // [view: inline] lateral habenula (LHb) parameters and state, which drives dipping / pausing in dopamine when the predicted positive outcome > actual, or actual negative outcome > predicted. Can also drive bursting for the converse, and via matrix phasic firing - LHb LHbParams `view:"inline" desc:"lateral habenula (LHb) parameters and state, which drives dipping / pausing in dopamine when the predicted positive outcome > actual, or actual negative outcome > predicted. Can also drive bursting for the converse, and via matrix phasic firing"` + // lateral habenula (LHb) parameters and state, which drives dipping / pausing in dopamine when the predicted positive outcome > actual, or actual negative outcome > predicted. Can also drive bursting for the converse, and via matrix phasic firing + LHb LHbParams `view:"inline"` // parameters for giving up based on PV pos - neg difference - GiveUp GiveUpParams `desc:"parameters for giving up based on PV pos - neg difference"` + GiveUp GiveUpParams } func (pp *PVLV) Defaults() { @@ -682,7 +682,7 @@ func (pp *PVLV) ResetGiveUp(ctx *Context, di uint32) { // after reward. func (pp *PVLV) NewState(ctx *Context, di uint32, rnd erand.Rand) { hadRewF := GlbV(ctx, di, GvHasRew) - hadRew := bools.FromFloat32(hadRewF) + hadRew := num.ToBool(hadRewF) SetGlbV(ctx, di, GvHadRew, hadRewF) SetGlbV(ctx, di, GvHadPosUS, GlbV(ctx, di, GvHasPosUS)) SetGlbV(ctx, di, GvHadNegUSOutcome, GlbV(ctx, di, GvNegUSOutcome)) @@ -760,7 +760,7 @@ func (pp *PVLV) PVsFmUSs(ctx *Context, di uint32) { pvPosSum, pvPos := pp.PVpos(ctx, di) SetGlbV(ctx, di, GvPVposSum, pvPosSum) SetGlbV(ctx, di, GvPVpos, pvPos) - SetGlbV(ctx, di, GvHasPosUS, bools.ToFloat32(pp.HasPosUS(ctx, di))) + SetGlbV(ctx, di, GvHasPosUS, num.FromBool[float32](pp.HasPosUS(ctx, di))) pvNegSum, pvNeg := pp.PVneg(ctx, di) SetGlbV(ctx, di, GvPVnegSum, pvNegSum) @@ -876,7 +876,7 @@ func (pp *PVLV) GiveUpFmPV(ctx *Context, di uint32, pvNeg float32, rnd erand.Ran SetGlbV(ctx, di, GvPVposEstDisc, posDisc) SetGlbV(ctx, di, GvGiveUpDiff, diff) SetGlbV(ctx, di, GvGiveUpProb, prob) - SetGlbV(ctx, di, GvGiveUp, bools.ToFloat32(giveUp)) + SetGlbV(ctx, di, GvGiveUp, num.FromBool[float32](giveUp)) return giveUp } diff --git a/axon/pvlv_layers.go b/axon/pvlv_layers.go index 09b32df27..e48d16d2a 100644 --- a/axon/pvlv_layers.go +++ b/axon/pvlv_layers.go @@ -9,7 +9,7 @@ import ( "strings" "github.com/goki/gosl/slbool" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //gosl: start pvlv_layers @@ -18,29 +18,29 @@ import ( // as a function of the MAX activation of its inputs. type LDTParams struct { - // [def: 0.05] threshold per input source, on absolute value (magnitude), to count as a significant reward event, which then drives maximal ACh -- set to 0 to disable this nonlinear behavior - SrcThr float32 `def:"0.05" desc:"threshold per input source, on absolute value (magnitude), to count as a significant reward event, which then drives maximal ACh -- set to 0 to disable this nonlinear behavior"` + // threshold per input source, on absolute value (magnitude), to count as a significant reward event, which then drives maximal ACh -- set to 0 to disable this nonlinear behavior + SrcThr float32 `def:"0.05"` - // [def: true] use the global Context.NeuroMod.HasRew flag -- if there is some kind of external reward being given, then ACh goes to 1, else 0 for this component - Rew slbool.Bool `def:"true" desc:"use the global Context.NeuroMod.HasRew flag -- if there is some kind of external reward being given, then ACh goes to 1, else 0 for this component"` + // use the global Context.NeuroMod.HasRew flag -- if there is some kind of external reward being given, then ACh goes to 1, else 0 for this component + Rew slbool.Bool `def:"true"` - // [def: 2] extent to which active maintenance (via Context.NeuroMod.NotMaint PTNotMaintLayer activity) inhibits ACh signals -- when goal engaged, distractability is lower. - MaintInhib float32 `def:"2" desc:"extent to which active maintenance (via Context.NeuroMod.NotMaint PTNotMaintLayer activity) inhibits ACh signals -- when goal engaged, distractability is lower."` + // extent to which active maintenance (via Context.NeuroMod.NotMaint PTNotMaintLayer activity) inhibits ACh signals -- when goal engaged, distractability is lower. + MaintInhib float32 `def:"2"` - // [def: 0.4] maximum NeuroMod.NotMaint activity for computing Maint as 1-NotMaint -- when NotMaint is >= NotMaintMax, then Maint = 0. - NotMaintMax float32 `def:"0.4" desc:"maximum NeuroMod.NotMaint activity for computing Maint as 1-NotMaint -- when NotMaint is >= NotMaintMax, then Maint = 0."` + // maximum NeuroMod.NotMaint activity for computing Maint as 1-NotMaint -- when NotMaint is >= NotMaintMax, then Maint = 0. + NotMaintMax float32 `def:"0.4"` // idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay1Name if present -- -1 if not used - SrcLay1Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay1Name if present -- -1 if not used"` + SrcLay1Idx int32 `inactive:"+"` // idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay2Name if present -- -1 if not used - SrcLay2Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay2Name if present -- -1 if not used"` + SrcLay2Idx int32 `inactive:"+"` // idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay3Name if present -- -1 if not used - SrcLay3Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay3Name if present -- -1 if not used"` + SrcLay3Idx int32 `inactive:"+"` // idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay4Name if present -- -1 if not used - SrcLay4Idx int32 `inactive:"+" desc:"idx of Layer to get max activity from -- set during Build from BuildConfig SrcLay4Name if present -- -1 if not used"` + SrcLay4Idx int32 `inactive:"+"` } func (lp *LDTParams) Defaults() { @@ -109,17 +109,17 @@ func (lp *LDTParams) ACh(ctx *Context, di uint32, srcLay1Act, srcLay2Act, srcLay // VSPatchParams parameters for VSPatch learning type VSPatchParams struct { - // [def: 3] multiplier applied after Thr threshold - Gain float32 `def:"3" desc:"multiplier applied after Thr threshold"` + // multiplier applied after Thr threshold + Gain float32 `def:"3"` - // [def: 0.15] initial value for overall threshold, which adapts over time -- stored in LayerVals.ActAvgVals.AdaptThr - ThrInit float32 `def:"0.15" desc:"initial value for overall threshold, which adapts over time -- stored in LayerVals.ActAvgVals.AdaptThr"` + // initial value for overall threshold, which adapts over time -- stored in LayerVals.ActAvgVals.AdaptThr + ThrInit float32 `def:"0.15"` - // [def: 0,0.002] learning rate for the threshold -- moves in proportion to same predictive error signal that drives synaptic learning - ThrLRate float32 `def:"0,0.002" desc:"learning rate for the threshold -- moves in proportion to same predictive error signal that drives synaptic learning"` + // learning rate for the threshold -- moves in proportion to same predictive error signal that drives synaptic learning + ThrLRate float32 `def:"0,0.002"` - // [def: 10] extra gain factor for non-reward trials, which is the most critical - ThrNonRew float32 `def:"10" desc:"extra gain factor for non-reward trials, which is the most critical"` + // extra gain factor for non-reward trials, which is the most critical + ThrNonRew float32 `def:"10"` } func (vp *VSPatchParams) Defaults() { @@ -149,11 +149,11 @@ func (vp *VSPatchParams) ThrVal(act, thr float32) float32 { // every cycle. type VTAParams struct { - // [def: 0.75] gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values - CeMGain float32 `def:"0.75" desc:"gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values"` + // gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values + CeMGain float32 `def:"0.75"` - // [def: 1.25] gain on computed LHb DA (Burst - Dip) -- for controlling DA levels - LHbGain float32 `def:"1.25" desc:"gain on computed LHb DA (Burst - Dip) -- for controlling DA levels"` + // gain on computed LHb DA (Burst - Dip) -- for controlling DA levels + LHbGain float32 `def:"1.25"` pad, pad1 float32 } diff --git a/axon/pvlv_net.go b/axon/pvlv_net.go index 9dc8bbf5e..a58395dda 100644 --- a/axon/pvlv_net.go +++ b/axon/pvlv_net.go @@ -5,9 +5,9 @@ package axon import ( - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" ) // AddLDTLayer adds a LDTLayer diff --git a/axon/rl_layers.go b/axon/rl_layers.go index da5a61d96..8fe1076db 100644 --- a/axon/rl_layers.go +++ b/axon/rl_layers.go @@ -5,7 +5,7 @@ package axon import ( - "github.com/emer/etable/minmax" + "goki.dev/etable/v2/minmax" ) //gosl: start rl_layers @@ -15,7 +15,7 @@ import ( type RWPredParams struct { // default 0.1..0.99 range of predictions that can be represented -- having a truncated range preserves some sensitivity in dopamine at the extremes of good or poor performance - PredRange minmax.F32 `desc:"default 0.1..0.99 range of predictions that can be represented -- having a truncated range preserves some sensitivity in dopamine at the extremes of good or poor performance"` + PredRange minmax.F32 } func (rp *RWPredParams) Defaults() { @@ -30,10 +30,10 @@ func (rp *RWPredParams) Update() { type RWDaParams struct { // tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value - TonicGe float32 `desc:"tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value"` + TonicGe float32 // idx of RWPredLayer to get reward prediction from -- set during Build from BuildConfig RWPredLayName - RWPredLayIdx int32 `inactive:"+" desc:"idx of RWPredLayer to get reward prediction from -- set during Build from BuildConfig RWPredLayName"` + RWPredLayIdx int32 `inactive:"+"` pad, pad1 uint32 } @@ -58,13 +58,13 @@ func (rp *RWDaParams) GeFmDA(da float32) float32 { type TDIntegParams struct { // discount factor -- how much to discount the future prediction from TDPred - Discount float32 `desc:"discount factor -- how much to discount the future prediction from TDPred"` + Discount float32 // gain factor on TD rew pred activations - PredGain float32 `desc:"gain factor on TD rew pred activations"` + PredGain float32 // idx of TDPredLayer to get reward prediction from -- set during Build from BuildConfig TDPredLayName - TDPredLayIdx int32 `inactive:"+" desc:"idx of TDPredLayer to get reward prediction from -- set during Build from BuildConfig TDPredLayName"` + TDPredLayIdx int32 `inactive:"+"` pad uint32 } @@ -82,10 +82,10 @@ func (tp *TDIntegParams) Update() { type TDDaParams struct { // tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value - TonicGe float32 `desc:"tonic baseline Ge level for DA = 0 -- +/- are between 0 and 2*TonicGe -- just for spiking display of computed DA value"` + TonicGe float32 // idx of TDIntegLayer to get reward prediction from -- set during Build from BuildConfig TDIntegLayName - TDIntegLayIdx int32 `inactive:"+" desc:"idx of TDIntegLayer to get reward prediction from -- set during Build from BuildConfig TDIntegLayName"` + TDIntegLayIdx int32 `inactive:"+"` pad, pad1 uint32 } diff --git a/axon/rl_net.go b/axon/rl_net.go index a5c5c2377..d5e1591bf 100644 --- a/axon/rl_net.go +++ b/axon/rl_net.go @@ -5,8 +5,8 @@ package axon import ( - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" ) // AddRewLayer adds a RewLayer of given name diff --git a/axon/threads.go b/axon/threads.go index 41535a85f..ffcdceae0 100644 --- a/axon/threads.go +++ b/axon/threads.go @@ -11,7 +11,7 @@ import ( "sort" "sync" - "github.com/emer/emergent/timer" + "github.com/emer/emergent/v2/timer" "github.com/goki/ki/atomctr" "github.com/goki/ki/ints" ) diff --git a/axon/threads_test.go b/axon/threads_test.go index 46ef3794f..e55339326 100644 --- a/axon/threads_test.go +++ b/axon/threads_test.go @@ -11,14 +11,14 @@ import ( "math/rand" "testing" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/patgen" - "github.com/emer/emergent/prjn" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/patgen" + "github.com/emer/emergent/v2/prjn" "github.com/goki/ki/ints" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" ) const ( diff --git a/chans/ak.go b/chans/ak.go index 824d100d8..6d3f157b2 100644 --- a/chans/ak.go +++ b/chans/ak.go @@ -4,7 +4,7 @@ package chans -import "github.com/goki/mat32" +import "goki.dev/mat32/v2" // AKParams control an A-type K channel, which is voltage gated with maximal // activation around -37 mV. It has two state variables, M (v-gated opening) @@ -17,23 +17,23 @@ import "github.com/goki/mat32" // not simulated, as in our standard axon models. type AKParams struct { - // [def: 1,0.1,0.01] strength of AK current - Gbar float32 `def:"1,0.1,0.01" desc:"strength of AK current"` + // strength of AK current + Gbar float32 `def:"1,0.1,0.01"` - // [def: 0.01446,02039] [viewif: Gbar>0] multiplier for the beta term; 0.01446 for distal, 0.02039 for proximal dendrites - Beta float32 `viewif:"Gbar>0" def:"0.01446,02039" desc:"multiplier for the beta term; 0.01446 for distal, 0.02039 for proximal dendrites"` + // multiplier for the beta term; 0.01446 for distal, 0.02039 for proximal dendrites + Beta float32 `viewif:"Gbar>0" def:"0.01446,02039"` - // [def: 0.5,0.25] [viewif: Gbar>0] Dm factor: 0.5 for distal, 0.25 for proximal - Dm float32 `viewif:"Gbar>0" def:"0.5,0.25" desc:"Dm factor: 0.5 for distal, 0.25 for proximal"` + // Dm factor: 0.5 for distal, 0.25 for proximal + Dm float32 `viewif:"Gbar>0" def:"0.5,0.25"` - // [def: 1.8,1.5] [viewif: Gbar>0] offset for K, 1.8 for distal, 1.5 for proximal - Koff float32 `viewif:"Gbar>0" def:"1.8,1.5" desc:"offset for K, 1.8 for distal, 1.5 for proximal"` + // offset for K, 1.8 for distal, 1.5 for proximal + Koff float32 `viewif:"Gbar>0" def:"1.8,1.5"` - // [def: 1,11] [viewif: Gbar>0] voltage offset for alpha and beta functions: 1 for distal, 11 for proximal - Voff float32 `viewif:"Gbar>0" def:"1,11" desc:"voltage offset for alpha and beta functions: 1 for distal, 11 for proximal"` + // voltage offset for alpha and beta functions: 1 for distal, 11 for proximal + Voff float32 `viewif:"Gbar>0" def:"1,11"` - // [def: 0.1133,0.1112] [viewif: Gbar>0] h multiplier factor, 0.1133 for distal, 0.1112 for proximal - Hf float32 `viewif:"Gbar>0" def:"0.1133,0.1112" desc:"h multiplier factor, 0.1133 for distal, 0.1112 for proximal"` + // h multiplier factor, 0.1133 for distal, 0.1112 for proximal + Hf float32 `viewif:"Gbar>0" def:"0.1133,0.1112"` pad, pad1 float32 } @@ -139,19 +139,19 @@ func (ap *AKParams) Gak(m, h float32) float32 { // voltage gated calcium channels which can otherwise drive runaway excitatory currents. type AKsParams struct { - // [def: 2,0.1,0.01] strength of AK current - Gbar float32 `def:"2,0.1,0.01" desc:"strength of AK current"` + // strength of AK current + Gbar float32 `def:"2,0.1,0.01"` - // [def: 0.076] [viewif: Gbar>0] H factor as a constant multiplier on overall M factor result -- rescales M to level consistent with H being present at full strength - Hf float32 `viewif:"Gbar>0" def:"0.076" desc:"H factor as a constant multiplier on overall M factor result -- rescales M to level consistent with H being present at full strength"` + // H factor as a constant multiplier on overall M factor result -- rescales M to level consistent with H being present at full strength + Hf float32 `viewif:"Gbar>0" def:"0.076"` - // [def: 0.075] [viewif: Gbar>0] multiplier for M -- determines slope of function - Mf float32 `viewif:"Gbar>0" def:"0.075" desc:"multiplier for M -- determines slope of function"` + // multiplier for M -- determines slope of function + Mf float32 `viewif:"Gbar>0" def:"0.075"` - // [def: 2] [viewif: Gbar>0] voltage offset in biological units for M function - Voff float32 `viewif:"Gbar>0" def:"2" desc:"voltage offset in biological units for M function"` + // voltage offset in biological units for M function + Voff float32 `viewif:"Gbar>0" def:"2"` - // [viewif: Gbar>0] + // Vmax float32 `viewif:"Gbar>0" def:-37" desc:"voltage level of maximum channel opening -- stays flat above that"` pad, pad1, pad2 int32 diff --git a/chans/gabab.go b/chans/gabab.go index 626a2a1eb..37cf70e53 100644 --- a/chans/gabab.go +++ b/chans/gabab.go @@ -5,7 +5,7 @@ package chans import ( - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //gosl: start chans @@ -14,32 +14,32 @@ import ( // based on Brunel & Wang (2001) parameters. type GABABParams struct { - // [def: 0,0.012,0.015] overall strength multiplier of GABA-B current. The 0.015 default is a high value that works well in smaller networks -- larger networks may benefit from lower levels (e.g., 0.012). - Gbar float32 `def:"0,0.012,0.015" desc:"overall strength multiplier of GABA-B current. The 0.015 default is a high value that works well in smaller networks -- larger networks may benefit from lower levels (e.g., 0.012)."` + // overall strength multiplier of GABA-B current. The 0.015 default is a high value that works well in smaller networks -- larger networks may benefit from lower levels (e.g., 0.012). + Gbar float32 `def:"0,0.012,0.015"` - // [def: 45] [viewif: Gbar>0] rise time for bi-exponential time dynamics of GABA-B - RiseTau float32 `viewif:"Gbar>0" def:"45" desc:"rise time for bi-exponential time dynamics of GABA-B"` + // rise time for bi-exponential time dynamics of GABA-B + RiseTau float32 `viewif:"Gbar>0" def:"45"` - // [def: 50] [viewif: Gbar>0] decay time for bi-exponential time dynamics of GABA-B - DecayTau float32 `viewif:"Gbar>0" def:"50" desc:"decay time for bi-exponential time dynamics of GABA-B"` + // decay time for bi-exponential time dynamics of GABA-B + DecayTau float32 `viewif:"Gbar>0" def:"50"` - // [def: 0.2] [viewif: Gbar>0] baseline level of GABA-B channels open independent of inhibitory input (is added to spiking-produced conductance) - Gbase float32 `viewif:"Gbar>0" def:"0.2" desc:"baseline level of GABA-B channels open independent of inhibitory input (is added to spiking-produced conductance)"` + // baseline level of GABA-B channels open independent of inhibitory input (is added to spiking-produced conductance) + Gbase float32 `viewif:"Gbar>0" def:"0.2"` - // [def: 10] [viewif: Gbar>0] multiplier for converting Gi to equivalent GABA spikes - GiSpike float32 `viewif:"Gbar>0" def:"10" desc:"multiplier for converting Gi to equivalent GABA spikes"` + // multiplier for converting Gi to equivalent GABA spikes + GiSpike float32 `viewif:"Gbar>0" def:"10"` - // [viewif: Gbar>0] time offset when peak conductance occurs, in msec, computed from RiseTau and DecayTau - MaxTime float32 `viewif:"Gbar>0" inactive:"+" desc:"time offset when peak conductance occurs, in msec, computed from RiseTau and DecayTau"` + // time offset when peak conductance occurs, in msec, computed from RiseTau and DecayTau + MaxTime float32 `viewif:"Gbar>0" inactive:"+"` - // [view: -] time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise)) - TauFact float32 `view:"-" desc:"time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise))"` + // time constant factor used in integration: (Decay / Rise) ^ (Rise / (Decay - Rise)) + TauFact float32 `view:"-"` - // [view: -] 1/Tau - RiseDt float32 `view:"-" inactive:"+" desc:"1/Tau"` + // 1/Tau + RiseDt float32 `view:"-" inactive:"+"` - // [view: -] 1/Tau - DecayDt float32 `view:"-" inactive:"+" desc:"1/Tau"` + // 1/Tau + DecayDt float32 `view:"-" inactive:"+"` pad, pad1, pad2 float32 } diff --git a/chans/mahp.go b/chans/mahp.go index b7560211c..06d0da11d 100644 --- a/chans/mahp.go +++ b/chans/mahp.go @@ -4,7 +4,7 @@ package chans -import "github.com/goki/mat32" +import "goki.dev/mat32/v2" //gosl: start chans @@ -18,22 +18,22 @@ import "github.com/goki/mat32" type MahpParams struct { // strength of mAHP current - Gbar float32 `desc:"strength of mAHP current"` + Gbar float32 - // [def: -30] [viewif: Gbar>0] voltage offset (threshold) in biological units for infinite time N gating function -- where the gate is at 50% strength - Voff float32 `viewif:"Gbar>0" def:"-30" desc:"voltage offset (threshold) in biological units for infinite time N gating function -- where the gate is at 50% strength"` + // voltage offset (threshold) in biological units for infinite time N gating function -- where the gate is at 50% strength + Voff float32 `viewif:"Gbar>0" def:"-30"` - // [def: 9] [viewif: Gbar>0] slope of the arget (infinite time) gating function - Vslope float32 `viewif:"Gbar>0" def:"9" desc:"slope of the arget (infinite time) gating function"` + // slope of the arget (infinite time) gating function + Vslope float32 `viewif:"Gbar>0" def:"9"` - // [def: 1000] [viewif: Gbar>0] maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp - TauMax float32 `viewif:"Gbar>0" def:"1000" desc:"maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp"` + // maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp + TauMax float32 `viewif:"Gbar>0" def:"1000"` - // [view: -] [viewif: Gbar>0] temperature adjustment factor: assume temp = 37 C, whereas original units were at 23 C - Tadj float32 `viewif:"Gbar>0" view:"-" inactive:"+" desc:"temperature adjustment factor: assume temp = 37 C, whereas original units were at 23 C"` + // temperature adjustment factor: assume temp = 37 C, whereas original units were at 23 C + Tadj float32 `viewif:"Gbar>0" view:"-" inactive:"+"` - // [view: -] 1/Tau - DtMax float32 `view:"-" inactive:"+" desc:"1/Tau"` + // 1/Tau + DtMax float32 `view:"-" inactive:"+"` pad, pad2 int32 } diff --git a/chans/nmda.go b/chans/nmda.go index dcb93f520..160391721 100644 --- a/chans/nmda.go +++ b/chans/nmda.go @@ -4,7 +4,7 @@ package chans -import "github.com/goki/mat32" +import "goki.dev/mat32/v2" //gosl: start chans @@ -15,29 +15,29 @@ import "github.com/goki/mat32" // increments type NMDAParams struct { - // [def: 0,0.006,0.007] overall multiplier for strength of NMDA current -- multiplies GnmdaSyn to get net conductance. - Gbar float32 `def:"0,0.006,0.007" desc:"overall multiplier for strength of NMDA current -- multiplies GnmdaSyn to get net conductance."` + // overall multiplier for strength of NMDA current -- multiplies GnmdaSyn to get net conductance. + Gbar float32 `def:"0,0.006,0.007"` - // [def: 30,50,100,200,300] [viewif: Gbar>0] decay time constant for NMDA channel activation -- rise time is 2 msec and not worth extra effort for biexponential. 30 fits the Urakubo et al (2008) model with ITau = 100, but 100 works better in practice is small networks so far. - Tau float32 `viewif:"Gbar>0" def:"30,50,100,200,300" desc:"decay time constant for NMDA channel activation -- rise time is 2 msec and not worth extra effort for biexponential. 30 fits the Urakubo et al (2008) model with ITau = 100, but 100 works better in practice is small networks so far."` + // decay time constant for NMDA channel activation -- rise time is 2 msec and not worth extra effort for biexponential. 30 fits the Urakubo et al (2008) model with ITau = 100, but 100 works better in practice is small networks so far. + Tau float32 `viewif:"Gbar>0" def:"30,50,100,200,300"` - // [def: 1,100] [viewif: Gbar>0] decay time constant for NMDA channel inhibition, which captures the Urakubo et al (2008) allosteric dynamics (100 fits their model well) -- set to 1 to eliminate that mechanism. - ITau float32 `viewif:"Gbar>0" def:"1,100" desc:"decay time constant for NMDA channel inhibition, which captures the Urakubo et al (2008) allosteric dynamics (100 fits their model well) -- set to 1 to eliminate that mechanism."` + // decay time constant for NMDA channel inhibition, which captures the Urakubo et al (2008) allosteric dynamics (100 fits their model well) -- set to 1 to eliminate that mechanism. + ITau float32 `viewif:"Gbar>0" def:"1,100"` - // [def: 1:1.5] [viewif: Gbar>0] magnesium ion concentration: Brunel & Wang (2001) and Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990). Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far in large models, 1.2, Voff = 0 best in smaller nets. - MgC float32 `viewif:"Gbar>0" def:"1:1.5" desc:"magnesium ion concentration: Brunel & Wang (2001) and Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990). Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far in large models, 1.2, Voff = 0 best in smaller nets."` + // magnesium ion concentration: Brunel & Wang (2001) and Sanders et al (2013) use 1 mM, based on Jahr & Stevens (1990). Urakubo et al (2008) use 1.5 mM. 1.4 with Voff = 5 works best so far in large models, 1.2, Voff = 0 best in smaller nets. + MgC float32 `viewif:"Gbar>0" def:"1:1.5"` - // [def: 0] [viewif: Gbar>0] offset in membrane potential in biological units for voltage-dependent functions. 5 corresponds to the -65 mV rest, -45 threshold of the Urakubo et al (2008) model. 5 was used before in a buggy version of NMDA equation -- 0 is new default. - Voff float32 `viewif:"Gbar>0" def:"0" desc:"offset in membrane potential in biological units for voltage-dependent functions. 5 corresponds to the -65 mV rest, -45 threshold of the Urakubo et al (2008) model. 5 was used before in a buggy version of NMDA equation -- 0 is new default."` + // offset in membrane potential in biological units for voltage-dependent functions. 5 corresponds to the -65 mV rest, -45 threshold of the Urakubo et al (2008) model. 5 was used before in a buggy version of NMDA equation -- 0 is new default. + Voff float32 `viewif:"Gbar>0" def:"0"` - // [view: -] rate = 1 / tau - Dt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"` + // rate = 1 / tau + Dt float32 `view:"-" json:"-" xml:"-"` - // [view: -] rate = 1 / tau - IDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"` + // rate = 1 / tau + IDt float32 `view:"-" json:"-" xml:"-"` - // [view: -] MgFact = MgC / 3.57 - MgFact float32 `view:"-" json:"-" xml:"-" desc:"MgFact = MgC / 3.57"` + // MgFact = MgC / 3.57 + MgFact float32 `view:"-" json:"-" xml:"-"` } func (np *NMDAParams) Defaults() { diff --git a/chans/sahp.go b/chans/sahp.go index 763ef19de..b768314b9 100644 --- a/chans/sahp.go +++ b/chans/sahp.go @@ -4,7 +4,7 @@ package chans -import "github.com/goki/mat32" +import "goki.dev/mat32/v2" //gosl: start chans @@ -18,26 +18,26 @@ import "github.com/goki/mat32" // of the n gating value, but tau is computed in any case. type SahpParams struct { - // [def: 0.05,0.1] strength of sAHP current - Gbar float32 `def:"0.05,0.1" desc:"strength of sAHP current"` + // strength of sAHP current + Gbar float32 `def:"0.05,0.1"` - // [def: 5,10] [viewif: Gbar>0] time constant for integrating Ca across theta cycles - CaTau float32 `viewif:"Gbar>0" def:"5,10" desc:"time constant for integrating Ca across theta cycles"` + // time constant for integrating Ca across theta cycles + CaTau float32 `viewif:"Gbar>0" def:"5,10"` - // [def: 0.8] [viewif: Gbar>0] integrated Ca offset (threshold) for infinite time N gating function -- where the gate is at 50% strength - Off float32 `viewif:"Gbar>0" def:"0.8" desc:"integrated Ca offset (threshold) for infinite time N gating function -- where the gate is at 50% strength"` + // integrated Ca offset (threshold) for infinite time N gating function -- where the gate is at 50% strength + Off float32 `viewif:"Gbar>0" def:"0.8"` - // [def: 0.02] [viewif: Gbar>0] slope of the infinite time logistic gating function - Slope float32 `viewif:"Gbar>0" def:"0.02" desc:"slope of the infinite time logistic gating function"` + // slope of the infinite time logistic gating function + Slope float32 `viewif:"Gbar>0" def:"0.02"` - // [def: 1] [viewif: Gbar>0] maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp - TauMax float32 `viewif:"Gbar>0" def:"1" desc:"maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp"` + // maximum slow rate time constant in msec for activation / deactivation. The effective Tau is much slower -- 1/20th in original temp, and 1/60th in standard 37 C temp + TauMax float32 `viewif:"Gbar>0" def:"1"` - // [view: -] 1/Tau - CaDt float32 `view:"-" inactive:"+" desc:"1/Tau"` + // 1/Tau + CaDt float32 `view:"-" inactive:"+"` - // [view: -] 1/Tau - DtMax float32 `view:"-" inactive:"+" desc:"1/Tau"` + // 1/Tau + DtMax float32 `view:"-" inactive:"+"` pad int32 } diff --git a/chans/skca.go b/chans/skca.go index e9da40727..98423cb7f 100644 --- a/chans/skca.go +++ b/chans/skca.go @@ -5,7 +5,7 @@ package chans import ( - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //gosl: start chans @@ -23,41 +23,41 @@ import ( // (also Muddapu & Chakravarthy, 2021): X^h / (X^h + C50^h) where h ~= 4 (hard coded) type SKCaParams struct { - // [def: 0,2,3] overall strength of sKCa current -- inactive if 0 - Gbar float32 `def:"0,2,3" desc:"overall strength of sKCa current -- inactive if 0"` + // overall strength of sKCa current -- inactive if 0 + Gbar float32 `def:"0,2,3"` - // [def: 0.4,0.5] [viewif: Gbar>0] 50% Ca concentration baseline value in Hill equation -- set this to level that activates at reasonable levels of SKCaR - C50 float32 `viewif:"Gbar>0" def:"0.4,0.5" desc:"50% Ca concentration baseline value in Hill equation -- set this to level that activates at reasonable levels of SKCaR"` + // 50% Ca concentration baseline value in Hill equation -- set this to level that activates at reasonable levels of SKCaR + C50 float32 `viewif:"Gbar>0" def:"0.4,0.5"` - // [def: 15] [viewif: Gbar>0] K channel gating factor activation time constant -- roughly 5-15 msec in literature - ActTau float32 `viewif:"Gbar>0" def:"15" desc:"K channel gating factor activation time constant -- roughly 5-15 msec in literature"` + // K channel gating factor activation time constant -- roughly 5-15 msec in literature + ActTau float32 `viewif:"Gbar>0" def:"15"` - // [def: 30] [viewif: Gbar>0] K channel gating factor deactivation time constant -- roughly 30-50 msec in literature - DeTau float32 `viewif:"Gbar>0" def:"30" desc:"K channel gating factor deactivation time constant -- roughly 30-50 msec in literature"` + // K channel gating factor deactivation time constant -- roughly 30-50 msec in literature + DeTau float32 `viewif:"Gbar>0" def:"30"` - // [def: 0.4,0.8] [viewif: Gbar>0] proportion of CaIn intracellular stores that are released per spike, going into CaR - KCaR float32 `viewif:"Gbar>0" def:"0.4,0.8" desc:"proportion of CaIn intracellular stores that are released per spike, going into CaR"` + // proportion of CaIn intracellular stores that are released per spike, going into CaR + KCaR float32 `viewif:"Gbar>0" def:"0.4,0.8"` - // [def: 150,200] [viewif: Gbar>0] SKCaR released calcium decay time constant - CaRDecayTau float32 `viewif:"Gbar>0" def:"150,200" desc:"SKCaR released calcium decay time constant"` + // SKCaR released calcium decay time constant + CaRDecayTau float32 `viewif:"Gbar>0" def:"150,200"` - // [def: 0.01] [viewif: Gbar>0] level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge. - CaInThr float32 `viewif:"Gbar>0" def:"0.01" desc:"level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge."` + // level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge. + CaInThr float32 `viewif:"Gbar>0" def:"0.01"` - // [def: 50] [viewif: Gbar>0] time constant in msec for storing CaIn when activity is below CaInThr - CaInTau float32 `viewif:"Gbar>0" def:"50" desc:"time constant in msec for storing CaIn when activity is below CaInThr"` + // time constant in msec for storing CaIn when activity is below CaInThr + CaInTau float32 `viewif:"Gbar>0" def:"50"` - // [view: -] rate = 1 / tau - ActDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"` + // rate = 1 / tau + ActDt float32 `view:"-" json:"-" xml:"-"` - // [view: -] rate = 1 / tau - DeDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"` + // rate = 1 / tau + DeDt float32 `view:"-" json:"-" xml:"-"` - // [view: -] rate = 1 / tau - CaRDecayDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"` + // rate = 1 / tau + CaRDecayDt float32 `view:"-" json:"-" xml:"-"` - // [view: -] rate = 1 / tau - CaInDt float32 `view:"-" json:"-" xml:"-" desc:"rate = 1 / tau"` + // rate = 1 / tau + CaInDt float32 `view:"-" json:"-" xml:"-"` } func (sp *SKCaParams) Defaults() { diff --git a/chans/vgcc.go b/chans/vgcc.go index dbcaa36ee..68e5e3d4d 100644 --- a/chans/vgcc.go +++ b/chans/vgcc.go @@ -5,7 +5,7 @@ package chans import ( - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //gosl: start chans @@ -16,11 +16,11 @@ import ( // In particular look at the file MODEL/Poirazi_cell/CaL.g. type VGCCParams struct { - // [def: 0.02,0.12] strength of VGCC current -- 0.12 value is from Urakubo et al (2008) model -- best fits actual model behavior using axon equations (1.5 nominal in that model), 0.02 works better in practice for not getting stuck in high plateau firing - Gbar float32 `def:"0.02,0.12" desc:"strength of VGCC current -- 0.12 value is from Urakubo et al (2008) model -- best fits actual model behavior using axon equations (1.5 nominal in that model), 0.02 works better in practice for not getting stuck in high plateau firing"` + // strength of VGCC current -- 0.12 value is from Urakubo et al (2008) model -- best fits actual model behavior using axon equations (1.5 nominal in that model), 0.02 works better in practice for not getting stuck in high plateau firing + Gbar float32 `def:"0.02,0.12"` - // [def: 25] [viewif: Gbar>0] calcium from conductance factor -- important for learning contribution of VGCC - Ca float32 `viewif:"Gbar>0" def:"25" desc:"calcium from conductance factor -- important for learning contribution of VGCC"` + // calcium from conductance factor -- important for learning contribution of VGCC + Ca float32 `viewif:"Gbar>0" def:"25"` pad, pad1 int32 } diff --git a/examples/attn_trn/attn.go b/examples/attn_trn/attn.go index 06cf54a7f..7584b4596 100644 --- a/examples/attn_trn/attn.go +++ b/examples/attn_trn/attn.go @@ -18,24 +18,24 @@ import ( "strconv" "github.com/emer/axon/axon" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/evec" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" - "github.com/emer/etable/agg" - "github.com/emer/etable/eplot" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - "github.com/emer/etable/split" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/gi/giv" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/evec" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" "github.com/goki/ki/ki" "github.com/goki/ki/kit" - "github.com/goki/mat32" + "goki.dev/etable/v2/agg" + "goki.dev/etable/v2/eplot" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + "goki.dev/etable/v2/split" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/gi/v2/giv" + "goki.dev/mat32/v2" ) // this is the stub main for gogi that calls our actual mainrun function, at end of file @@ -216,89 +216,89 @@ var ParamSets = params.Sets{ // for the fields which provide hints to how things should be displayed). type Sim struct { - // [def: 200] number of cycles per trial - Cycles int `def:"200" desc:"number of cycles per trial"` + // number of cycles per trial + Cycles int `def:"200"` - // [def: 10] number of runs to run to collect stats - Runs int `def:"10" desc:"number of runs to run to collect stats"` + // number of runs to run to collect stats + Runs int `def:"10"` - // [def: true] sodium (Na) gated potassium (K) channels that cause neurons to fatigue over time - KNaAdapt bool `def:"true" desc:"sodium (Na) gated potassium (K) channels that cause neurons to fatigue over time"` + // sodium (Na) gated potassium (K) channels that cause neurons to fatigue over time + KNaAdapt bool `def:"true"` - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: Standard same-to-same size topographic projection] + // Prjn3x3Skp1 *prjn.PoolTile `view:"Standard same-to-same size topographic projection"` - // [view: Standard same-to-same size topographic projection] + // Prjn5x5Skp1 *prjn.PoolTile `view:"Standard same-to-same size topographic projection"` // select which type of test (input patterns) to use - Test TestType `desc:"select which type of test (input patterns) to use"` + Test TestType - // [view: no-inline] testing trial-level log data -- click to see record of network's response to each input - TstTrlLog *etable.Table `view:"no-inline" desc:"testing trial-level log data -- click to see record of network's response to each input"` + // testing trial-level log data -- click to see record of network's response to each input + TstTrlLog *etable.Table `view:"no-inline"` - // [view: no-inline] aggregated testing data - TstRunLog *etable.Table `view:"no-inline" desc:"aggregated testing data"` + // aggregated testing data + TstRunLog *etable.Table `view:"no-inline"` - // [view: no-inline] aggregate stats on testing data - TstStats *etable.Table `view:"no-inline" desc:"aggregate stats on testing data"` + // aggregate stats on testing data + TstStats *etable.Table `view:"no-inline"` - // [view: no-inline] full collection of param sets -- not really interesting for this model - Params params.Sets `view:"no-inline" desc:"full collection of param sets -- not really interesting for this model"` + // full collection of param sets -- not really interesting for this model + Params params.Sets `view:"no-inline"` // Testing environment -- manages iterating over testing - TestEnv AttnEnv `desc:"Testing environment -- manages iterating over testing"` + TestEnv AttnEnv // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context // whether to update the network view while running - ViewOn bool `desc:"whether to update the network view while running"` + ViewOn bool // at what time scale to update the display during testing? Change to AlphaCyc to make display updating go faster - ViewUpdt axon.TimeScales `desc:"at what time scale to update the display during testing? Change to AlphaCyc to make display updating go faster"` + ViewUpdt axon.TimeScales // layer to measure attentional effects on - AttnLay string `desc:"layer to measure attentional effects on"` + AttnLay string // names of layers to record activations etc of during testing - TstRecLays []string `desc:"names of layers to record activations etc of during testing"` + TstRecLays []string // max activation in center of stimulus 1 (attended, stronger) - S1Act float32 `desc:"max activation in center of stimulus 1 (attended, stronger)"` + S1Act float32 // max activation in center of stimulus 2 (ignored, weaker) - S2Act float32 `desc:"max activation in center of stimulus 2 (ignored, weaker)"` + S2Act float32 // percent modulation = (S1Act - S2Act) / S1Act - PctMod float32 `desc:"percent modulation = (S1Act - S2Act) / S1Act"` + PctMod float32 - // [view: -] main GUI window - Win *gi.Window `view:"-" desc:"main GUI window"` + // main GUI window + Win *gi.Window `view:"-"` - // [view: -] the network viewer - NetView *netview.NetView `view:"-" desc:"the network viewer"` + // the network viewer + NetView *netview.NetView `view:"-"` - // [view: -] the master toolbar - ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"` + // the master toolbar + ToolBar *gi.ToolBar `view:"-"` - // [view: -] the test-trial plot - TstTrlPlot *eplot.Plot2D `view:"-" desc:"the test-trial plot"` + // the test-trial plot + TstTrlPlot *eplot.Plot2D `view:"-"` - // [view: -] the test-trial plot - TstRunPlot *eplot.Plot2D `view:"-" desc:"the test-trial plot"` + // the test-trial plot + TstRunPlot *eplot.Plot2D `view:"-"` - // [view: -] for holding layer values - ValsTsrs map[string]*etensor.Float32 `view:"-" desc:"for holding layer values"` + // for holding layer values + ValsTsrs map[string]*etensor.Float32 `view:"-"` - // [view: -] true if sim is running - IsRunning bool `view:"-" desc:"true if sim is running"` + // true if sim is running + IsRunning bool `view:"-"` - // [view: -] flag to stop running - StopNow bool `view:"-" desc:"flag to stop running"` + // flag to stop running + StopNow bool `view:"-"` } // this registers this Sim Type and gives it properties that e.g., diff --git a/examples/attn_trn/attn_env.go b/examples/attn_trn/attn_env.go index aab48b93b..1ecd4201a 100644 --- a/examples/attn_trn/attn_env.go +++ b/examples/attn_trn/attn_env.go @@ -9,27 +9,27 @@ package main import ( "fmt" - "github.com/emer/emergent/efuns" - "github.com/emer/emergent/env" - "github.com/emer/emergent/evec" - "github.com/emer/etable/etensor" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/efuns" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/evec" + "goki.dev/etable/v2/etensor" + "goki.dev/mat32/v2" ) // Stim describes a single stimulus type Stim struct { // position in normalized coordintes - Pos mat32.Vec2 `desc:"position in normalized coordintes"` + Pos mat32.Vec2 // feature number: 0-3 for V1 input, -1 for LIP attn - Feat int `desc:"feature number: 0-3 for V1 input, -1 for LIP attn"` + Feat int // normalized width - Width float32 `desc:"normalized width"` + Width float32 // normalized contrast level - Contrast float32 `desc:"normalized contrast level"` + Contrast float32 } // PosXY returns XY position projected into size of grid @@ -41,10 +41,10 @@ func (st *Stim) PosXY(size evec.Vec2i) mat32.Vec2 { type StimSet struct { // description of set - Name string `desc:"description of set"` + Name string // stims to present - Stims []Stim `desc:"stims to present"` + Stims []Stim } // Stims is a list of a set of stimuli to present @@ -57,52 +57,52 @@ type Stims []StimSet type AttnEnv struct { // name of this environment - Nm string `desc:"name of this environment"` + Nm string // description of this environment - Dsc string `desc:"description of this environment"` + Dsc string // multiplier on contrast function - ContrastMult float32 `desc:"multiplier on contrast function"` + ContrastMult float32 // gain on contrast function inside exponential - ContrastGain float32 `desc:"gain on contrast function inside exponential"` + ContrastGain float32 // offset on contrast function - ContrastOff float32 `desc:"offset on contrast function"` + ContrastOff float32 // use gaussian for LIP -- otherwise fixed circle - LIPGauss bool `desc:"use gaussian for LIP -- otherwise fixed circle"` + LIPGauss bool // a list of stimuli to present - Stims Stims `desc:"a list of stimuli to present"` + Stims Stims // current stimuli presented - CurStim *StimSet `inactive:"+" desc:"current stimuli presented"` + CurStim *StimSet `inactive:"+"` // activation level (midpoint) -- feature is incremented, rest decremented relative to this - Act float32 `desc:"activation level (midpoint) -- feature is incremented, rest decremented relative to this"` + Act float32 // size of V1 Pools - V1Pools evec.Vec2i `desc:"size of V1 Pools"` + V1Pools evec.Vec2i // size of V1 features per pool - V1Feats evec.Vec2i `desc:"size of V1 features per pool"` + V1Feats evec.Vec2i // V1 rendered input state, 4D Size x Size - V1 etensor.Float32 `desc:"V1 rendered input state, 4D Size x Size"` + V1 etensor.Float32 // LIP top-down attention - LIP etensor.Float32 `desc:"LIP top-down attention"` + LIP etensor.Float32 - // [view: inline] current run of model as provided during Init - Run env.Ctr `view:"inline" desc:"current run of model as provided during Init"` + // current run of model as provided during Init + Run env.Ctr `view:"inline"` - // [view: inline] number of times through Seq.Max number of sequences - Epoch env.Ctr `view:"inline" desc:"number of times through Seq.Max number of sequences"` + // number of times through Seq.Max number of sequences + Epoch env.Ctr `view:"inline"` - // [view: inline] trial increments over input states -- could add Event as a lower level - Trial env.Ctr `view:"inline" desc:"trial increments over input states -- could add Event as a lower level"` + // trial increments over input states -- could add Event as a lower level + Trial env.Ctr `view:"inline"` } func (ev *AttnEnv) Name() string { return ev.Nm } diff --git a/examples/attn_trn/stims.go b/examples/attn_trn/stims.go index 6d815b6a7..8c8530b60 100644 --- a/examples/attn_trn/stims.go +++ b/examples/attn_trn/stims.go @@ -6,7 +6,7 @@ package main -import "github.com/goki/mat32" +import "goki.dev/mat32/v2" // StimAttnSize is a list of stimuli manipulating the size of stimuli vs. attention // it is the primary test of Reynolds & Heeger 2009 attentional dynamics. diff --git a/examples/bench/bench.go b/examples/bench/bench.go index 9d03236be..8a3a2a865 100644 --- a/examples/bench/bench.go +++ b/examples/bench/bench.go @@ -15,14 +15,14 @@ import ( "math/rand" "github.com/emer/axon/axon" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/params" - "github.com/emer/emergent/patgen" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/timer" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/patgen" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/timer" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" ) // note: with 2 hidden layers, this simple test case converges to perfect performance: diff --git a/examples/bench/bench_test.go b/examples/bench/bench_test.go index 3a7421aeb..298d146a0 100644 --- a/examples/bench/bench_test.go +++ b/examples/bench/bench_test.go @@ -10,9 +10,9 @@ import ( "testing" "github.com/emer/axon/axon" - "github.com/emer/emergent/etime" - "github.com/emer/etable/etable" - "github.com/goki/gi/gi" + "github.com/emer/emergent/v2/etime" + "goki.dev/etable/v2/etable" + "goki.dev/gi/v2/gi" ) func init() { diff --git a/examples/bench_lvis/bench_lvis.go b/examples/bench_lvis/bench_lvis.go index a56694129..345f3c08c 100644 --- a/examples/bench_lvis/bench_lvis.go +++ b/examples/bench_lvis/bench_lvis.go @@ -14,14 +14,14 @@ import ( "math/rand" "github.com/emer/axon/axon" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/params" - "github.com/emer/emergent/patgen" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/timer" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/patgen" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/timer" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" ) var ParamSets = params.Sets{ diff --git a/examples/bench_lvis/bench_lvis_test.go b/examples/bench_lvis/bench_lvis_test.go index e6daa6095..55b3c4830 100644 --- a/examples/bench_lvis/bench_lvis_test.go +++ b/examples/bench_lvis/bench_lvis_test.go @@ -10,9 +10,9 @@ import ( "testing" "github.com/emer/axon/axon" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" "github.com/stretchr/testify/require" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" ) var gpu = flag.Bool("gpu", false, "whether to run gpu or not") diff --git a/examples/bench_objrec/config.go b/examples/bench_objrec/config.go index 7274052da..004ba3b57 100644 --- a/examples/bench_objrec/config.go +++ b/examples/bench_objrec/config.go @@ -4,7 +4,7 @@ package main -import "github.com/emer/emergent/prjn" +import "github.com/emer/emergent/v2/prjn" // EnvConfig has config params for environment // note: only adding fields for key Env params that matter for both Network and Env @@ -12,37 +12,37 @@ import "github.com/emer/emergent/prjn" type EnvConfig struct { // env parameters -- can set any field/subfield on Env struct, using standard TOML formatting - Env map[string]any `desc:"env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"` + Env map[string]any - // [def: 5] number of units per localist output unit - NOutPer int `def:"5" desc:"number of units per localist output unit"` + // number of units per localist output unit + NOutPer int `def:"5"` } // ParamConfig has config parameters related to sim params type ParamConfig struct { // network parameters - Network map[string]any `desc:"network parameters"` + Network map[string]any // Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params - Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"` + Sheet string // extra tag to add to file names and logs saved from this run - Tag string `desc:"extra tag to add to file names and logs saved from this run"` + Tag string // user note -- describe the run params etc -- like a git commit message for the run - Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"` + Note string // Name of the JSON file to input saved parameters from. - File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."` + File string `nest:"+"` // Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params - SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"` + SaveAll bool `nest:"+"` // for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time. - Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."` + Good bool `nest:"+"` - // [view: projection from V1 to V4 which is tiled 4x4 skip 2 with topo scale values] + // V1V4Prjn *prjn.PoolTile `nest:"+" view:"projection from V1 to V4 which is tiled 4x4 skip 2 with topo scale values"` } @@ -61,82 +61,82 @@ func (cfg *ParamConfig) Defaults() { // RunConfig has config parameters related to running the sim type RunConfig struct { - // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16 - GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"` + // use the GPU for computation -- generally faster even for small models if NData ~16 + GPU bool `def:"true"` - // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. - NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."` + // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. + NData int `def:"16" min:"1"` - // [def: 0] number of parallel threads for CPU computation -- 0 = use default - NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"` + // number of parallel threads for CPU computation -- 0 = use default + NThreads int `def:"0"` - // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1 - Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"` + // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1 + Run int `def:"0"` - // [def: 1] [min: 1] total number of runs to do when running Train - NRuns int `def:"1" min:"1" desc:"total number of runs to do when running Train"` + // total number of runs to do when running Train + NRuns int `def:"1" min:"1"` - // [def: 200] total number of epochs per run - NEpochs int `def:"200" desc:"total number of epochs per run"` + // total number of epochs per run + NEpochs int `def:"200"` - // [def: 128] total number of trials per epoch. Should be an even multiple of NData. - NTrials int `def:"128" desc:"total number of trials per epoch. Should be an even multiple of NData."` + // total number of trials per epoch. Should be an even multiple of NData. + NTrials int `def:"128"` - // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance? - PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"` + // how frequently (in epochs) to compute PCA on hidden representations to measure variance? + PCAInterval int `def:"5"` - // [def: -1] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing - TestInterval int `def:"-1" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"` + // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing + TestInterval int `def:"-1"` } // LogConfig has config parameters related to logging data type LogConfig struct { // if true, save final weights after each run - SaveWts bool `desc:"if true, save final weights after each run"` + SaveWts bool - // [def: true] if true, save train epoch log to file, as .epc.tsv typically - Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"` + // if true, save train epoch log to file, as .epc.tsv typically + Epoch bool `def:"true" nest:"+"` - // [def: true] if true, save run log to file, as .run.tsv typically - Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"` + // if true, save run log to file, as .run.tsv typically + Run bool `def:"true" nest:"+"` - // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large. - Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."` + // if true, save train trial log to file, as .trl.tsv typically. May be large. + Trial bool `def:"false" nest:"+"` - // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there. - TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."` + // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there. + TestEpoch bool `def:"false" nest:"+"` - // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large. - TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."` + // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large. + TestTrial bool `def:"false" nest:"+"` // if true, save network activation etc data from testing trials, for later viewing in netview - NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"` + NetData bool } // Config is a standard Sim config -- use as a starting point. type Config struct { // specify include files here, and after configuration, it contains list of include files added - Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"` + Includes []string - // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits - GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"` + // open the GUI -- does not automatically run -- if false, then runs automatically and quits + GUI bool `def:"true"` // log debugging information - Debug bool `desc:"log debugging information"` + Debug bool - // [view: add-fields] environment configuration options - Env EnvConfig `view:"add-fields" desc:"environment configuration options"` + // environment configuration options + Env EnvConfig `view:"add-fields"` - // [view: add-fields] parameter related configuration options - Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"` + // parameter related configuration options + Params ParamConfig `view:"add-fields"` - // [view: add-fields] sim running related configuration options - Run RunConfig `view:"add-fields" desc:"sim running related configuration options"` + // sim running related configuration options + Run RunConfig `view:"add-fields"` - // [view: add-fields] data logging related configuration options - Log LogConfig `view:"add-fields" desc:"data logging related configuration options"` + // data logging related configuration options + Log LogConfig `view:"add-fields"` } func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes } diff --git a/examples/bench_objrec/led_env.go b/examples/bench_objrec/led_env.go index f36d67d48..dc360ce2a 100644 --- a/examples/bench_objrec/led_env.go +++ b/examples/bench_objrec/led_env.go @@ -8,10 +8,10 @@ import ( "fmt" "math/rand" - "github.com/emer/emergent/env" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/env" "github.com/emer/vision/vfilter" "github.com/emer/vision/vxform" + "goki.dev/etable/v2/etensor" ) // LEDEnv generates images of old-school "LED" style "letters" composed of a set of horizontal @@ -20,52 +20,52 @@ import ( type LEDEnv struct { // name of this environment - Nm string `desc:"name of this environment"` + Nm string // description of this environment - Dsc string `desc:"description of this environment"` + Dsc string // draws LEDs onto image - Draw LEDraw `desc:"draws LEDs onto image"` + Draw LEDraw // visual processing params - Vis Vis `desc:"visual processing params"` + Vis Vis // number of output units per LED item -- spiking benefits from replication - NOutPer int `desc:"number of output units per LED item -- spiking benefits from replication"` + NOutPer int - // [min: 0] [max: 19] minimum LED number to draw (0-19) - MinLED int `min:"0" max:"19" desc:"minimum LED number to draw (0-19)"` + // minimum LED number to draw (0-19) + MinLED int `min:"0" max:"19"` - // [min: 0] [max: 19] maximum LED number to draw (0-19) - MaxLED int `min:"0" max:"19" desc:"maximum LED number to draw (0-19)"` + // maximum LED number to draw (0-19) + MaxLED int `min:"0" max:"19"` // current LED number that was drawn - CurLED int `inactive:"+" desc:"current LED number that was drawn"` + CurLED int `inactive:"+"` // previous LED number that was drawn - PrvLED int `inactive:"+" desc:"previous LED number that was drawn"` + PrvLED int `inactive:"+"` // random transform parameters - XFormRand vxform.Rand `desc:"random transform parameters"` + XFormRand vxform.Rand // current -- prev transforms - XForm vxform.XForm `desc:"current -- prev transforms"` + XForm vxform.XForm - // [view: inline] current run of model as provided during Init - Run env.Ctr `view:"inline" desc:"current run of model as provided during Init"` + // current run of model as provided during Init + Run env.Ctr `view:"inline"` - // [view: inline] number of times through Seq.Max number of sequences - Epoch env.Ctr `view:"inline" desc:"number of times through Seq.Max number of sequences"` + // number of times through Seq.Max number of sequences + Epoch env.Ctr `view:"inline"` - // [view: inline] trial is the step counter within epoch - Trial env.Ctr `view:"inline" desc:"trial is the step counter within epoch"` + // trial is the step counter within epoch + Trial env.Ctr `view:"inline"` // original image prior to random transforms - OrigImg etensor.Float32 `desc:"original image prior to random transforms"` + OrigImg etensor.Float32 // CurLED one-hot output tensor - Output etensor.Float32 `desc:"CurLED one-hot output tensor"` + Output etensor.Float32 } func (ev *LEDEnv) Name() string { return ev.Nm } diff --git a/examples/bench_objrec/leds.go b/examples/bench_objrec/leds.go index 092e4fd17..9032cedd2 100644 --- a/examples/bench_objrec/leds.go +++ b/examples/bench_objrec/leds.go @@ -7,8 +7,8 @@ package main import ( "image" - "github.com/goki/gi/gi" - "github.com/goki/gi/girl" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/girl" ) // LEDraw renders old-school "LED" style "letters" composed of a set of horizontal @@ -16,29 +16,29 @@ import ( // Renders using SVG. type LEDraw struct { - // [def: 4] line width of LEDraw as percent of display size - Width float32 `def:"4" desc:"line width of LEDraw as percent of display size"` + // line width of LEDraw as percent of display size + Width float32 `def:"4"` - // [def: 0.6] size of overall LED as proportion of overall image size - Size float32 `def:"0.6" desc:"size of overall LED as proportion of overall image size"` + // size of overall LED as proportion of overall image size + Size float32 `def:"0.6"` // color name for drawing lines - LineColor gi.ColorName `desc:"color name for drawing lines"` + LineColor gi.ColorName // color name for background - BgColor gi.ColorName `desc:"color name for background"` + BgColor gi.ColorName // size of image to render - ImgSize image.Point `desc:"size of image to render"` + ImgSize image.Point - // [view: -] rendered image - Image *image.RGBA `view:"-" desc:"rendered image"` + // rendered image + Image *image.RGBA `view:"-"` - // [view: +] painter object - Paint girl.Paint `view:"+" desc:"painter object"` + // painter object + Paint girl.Paint `view:"+"` - // [view: -] rendering state - Render girl.State `view:"-" desc:"rendering state"` + // rendering state + Render girl.State `view:"-"` } func (ld *LEDraw) Defaults() { diff --git a/examples/bench_objrec/objrec.go b/examples/bench_objrec/objrec.go index 98f1875b1..d9ace9b13 100644 --- a/examples/bench_objrec/objrec.go +++ b/examples/bench_objrec/objrec.go @@ -15,31 +15,31 @@ import ( "os" "github.com/emer/axon/axon" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" - "github.com/emer/empi/mpi" - "github.com/emer/etable/agg" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - "github.com/emer/etable/etview" - "github.com/emer/etable/minmax" - "github.com/emer/etable/split" - "github.com/emer/etable/tsragg" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" + "github.com/emer/empi/v2/mpi" "github.com/goki/vgpu/vgpu" + "goki.dev/etable/v2/agg" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + "goki.dev/etable/v2/etview" + "goki.dev/etable/v2/minmax" + "goki.dev/etable/v2/split" + "goki.dev/etable/v2/tsragg" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/mat32/v2" ) func main() { @@ -63,37 +63,37 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] all parameter management - Params emer.NetParams `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/bench_objrec/params.go b/examples/bench_objrec/params.go index 0288f6060..abe58e51d 100644 --- a/examples/bench_objrec/params.go +++ b/examples/bench_objrec/params.go @@ -1,8 +1,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets is the default set of parameters -- Base is always applied, and others can be optionally diff --git a/examples/bench_objrec/v1filter.go b/examples/bench_objrec/v1filter.go index c64561066..13cf0e78b 100644 --- a/examples/bench_objrec/v1filter.go +++ b/examples/bench_objrec/v1filter.go @@ -8,74 +8,74 @@ import ( "image" "github.com/anthonynsimon/bild/transform" - "github.com/emer/etable/etensor" "github.com/emer/leabra/fffb" "github.com/emer/vision/gabor" "github.com/emer/vision/kwta" "github.com/emer/vision/v1complex" "github.com/emer/vision/vfilter" "github.com/goki/ki/kit" + "goki.dev/etable/v2/etensor" ) // Vis encapsulates specific visual processing pipeline for V1 filtering type Vis struct { // V1 simple gabor filter parameters - V1sGabor gabor.Filter `desc:"V1 simple gabor filter parameters"` + V1sGabor gabor.Filter - // [view: inline] geometry of input, output for V1 simple-cell processing - V1sGeom vfilter.Geom `inactive:"+" view:"inline" desc:"geometry of input, output for V1 simple-cell processing"` + // geometry of input, output for V1 simple-cell processing + V1sGeom vfilter.Geom `inactive:"+" view:"inline"` // neighborhood inhibition for V1s -- each unit gets inhibition from same feature in nearest orthogonal neighbors -- reduces redundancy of feature code - V1sNeighInhib kwta.NeighInhib `desc:"neighborhood inhibition for V1s -- each unit gets inhibition from same feature in nearest orthogonal neighbors -- reduces redundancy of feature code"` + V1sNeighInhib kwta.NeighInhib // kwta parameters for V1s - V1sKWTA kwta.KWTA `desc:"kwta parameters for V1s"` + V1sKWTA kwta.KWTA // target image size to use -- images will be rescaled to this size - ImgSize image.Point `desc:"target image size to use -- images will be rescaled to this size"` + ImgSize image.Point - // [view: no-inline] V1 simple gabor filter tensor - V1sGaborTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter tensor"` + // V1 simple gabor filter tensor + V1sGaborTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] input image as tensor - ImgTsr etensor.Float32 `view:"no-inline" desc:"input image as tensor"` + // input image as tensor + ImgTsr etensor.Float32 `view:"no-inline"` - // [view: -] current input image - Img image.Image `view:"-" desc:"current input image"` + // current input image + Img image.Image `view:"-"` - // [view: no-inline] V1 simple gabor filter output tensor - V1sTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output tensor"` + // V1 simple gabor filter output tensor + V1sTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] V1 simple extra Gi from neighbor inhibition tensor - V1sExtGiTsr etensor.Float32 `view:"no-inline" desc:"V1 simple extra Gi from neighbor inhibition tensor"` + // V1 simple extra Gi from neighbor inhibition tensor + V1sExtGiTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] V1 simple gabor filter output, kwta output tensor - V1sKwtaTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output, kwta output tensor"` + // V1 simple gabor filter output, kwta output tensor + V1sKwtaTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] V1 simple gabor filter output, max-pooled 2x2 of V1sKwta tensor - V1sPoolTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output, max-pooled 2x2 of V1sKwta tensor"` + // V1 simple gabor filter output, max-pooled 2x2 of V1sKwta tensor + V1sPoolTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] V1 simple gabor filter output, un-max-pooled 2x2 of V1sPool tensor - V1sUnPoolTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output, un-max-pooled 2x2 of V1sPool tensor"` + // V1 simple gabor filter output, un-max-pooled 2x2 of V1sPool tensor + V1sUnPoolTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] V1 simple gabor filter output, angle-only features tensor - V1sAngOnlyTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output, angle-only features tensor"` + // V1 simple gabor filter output, angle-only features tensor + V1sAngOnlyTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] V1 simple gabor filter output, max-pooled 2x2 of AngOnly tensor - V1sAngPoolTsr etensor.Float32 `view:"no-inline" desc:"V1 simple gabor filter output, max-pooled 2x2 of AngOnly tensor"` + // V1 simple gabor filter output, max-pooled 2x2 of AngOnly tensor + V1sAngPoolTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] V1 complex length sum filter output tensor - V1cLenSumTsr etensor.Float32 `view:"no-inline" desc:"V1 complex length sum filter output tensor"` + // V1 complex length sum filter output tensor + V1cLenSumTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] V1 complex end stop filter output tensor - V1cEndStopTsr etensor.Float32 `view:"no-inline" desc:"V1 complex end stop filter output tensor"` + // V1 complex end stop filter output tensor + V1cEndStopTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] Combined V1 output tensor with V1s simple as first two rows, then length sum, then end stops = 5 rows total - V1AllTsr etensor.Float32 `view:"no-inline" desc:"Combined V1 output tensor with V1s simple as first two rows, then length sum, then end stops = 5 rows total"` + // Combined V1 output tensor with V1s simple as first two rows, then length sum, then end stops = 5 rows total + V1AllTsr etensor.Float32 `view:"no-inline"` - // [view: no-inline] inhibition values for V1s KWTA - V1sInhibs fffb.Inhibs `view:"no-inline" desc:"inhibition values for V1s KWTA"` + // inhibition values for V1s KWTA + V1sInhibs fffb.Inhibs `view:"no-inline"` } var KiT_Vis = kit.Types.AddType(&Vis{}, nil) diff --git a/examples/boa/boa.go b/examples/boa/boa.go index 4dcdf6958..a27cc2af4 100644 --- a/examples/boa/boa.go +++ b/examples/boa/boa.go @@ -15,29 +15,29 @@ import ( "github.com/emer/axon/axon" "github.com/emer/axon/examples/boa/armaze" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/timer" - "github.com/emer/empi/mpi" - "github.com/emer/etable/agg" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - "github.com/emer/etable/minmax" - "github.com/emer/etable/split" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/ki/bools" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/timer" + "github.com/emer/empi/v2/mpi" + "goki.dev/etable/v2/agg" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + "goki.dev/etable/v2/minmax" + "goki.dev/etable/v2/split" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/glop/bools" + "goki.dev/mat32/v2" ) func main() { @@ -61,49 +61,49 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` // if true, stop running at end of a sequence (for NetView Di data parallel index) - StopOnSeq bool `desc:"if true, stop running at end of a sequence (for NetView Di data parallel index)"` + StopOnSeq bool // if true, stop running when an error programmed into the code occurs - StopOnErr bool `desc:"if true, stop running when an error programmed into the code occurs"` + StopOnErr bool - // [view: inline] network parameter management - Params emer.NetParams `view:"inline" desc:"network parameter management"` + // network parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] gui for viewing env - EnvGUI *armaze.GUI `view:"-" desc:"gui for viewing env"` + // gui for viewing env + EnvGUI *armaze.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` - // [view: -] testing data, from -test arg - TestData map[string]float32 `view:"-" desc:"testing data, from -test arg"` + // testing data, from -test arg + TestData map[string]float32 `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/boa/boa_test.go b/examples/boa/boa_test.go index 3ba4d7691..26b6ac80f 100644 --- a/examples/boa/boa_test.go +++ b/examples/boa/boa_test.go @@ -13,8 +13,8 @@ import ( "testing" "github.com/alecthomas/assert/v2" - "github.com/emer/emergent/etime" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/etime" + "goki.dev/mat32/v2" "golang.org/x/exp/maps" ) diff --git a/examples/boa/config.go b/examples/boa/config.go index 5424f346f..5f9351eac 100644 --- a/examples/boa/config.go +++ b/examples/boa/config.go @@ -4,30 +4,30 @@ package main -import "github.com/emer/empi/mpi" +import "github.com/emer/empi/v2/mpi" // EnvConfig has config params for environment // note: only adding fields for key Env params that matter for both Network and Env // other params are set via the Env map data mechanism. type EnvConfig struct { - // name of config file that loads into Env.Config for setting environment parameters directly - Config string `desc:"name of config file that loads into Env.Config for setting environment parameters directly"` + // env parameters -- can set any field/subfield on Env struct, using standard TOML formatting + Env map[string]any - // [def: 4] number of different drive-like body states (hunger, thirst, etc), that are satisfied by a corresponding US outcome - NDrives int `def:"4" desc:"number of different drive-like body states (hunger, thirst, etc), that are satisfied by a corresponding US outcome"` + // number of different drive-like body states (hunger, thirst, etc), that are satisfied by a corresponding US outcome + NDrives int `def:"4"` - // [def: 10] epoch when PctCortex starts increasing - PctCortexStEpc int `def:"10" desc:"epoch when PctCortex starts increasing"` + // epoch when PctCortex starts increasing + PctCortexStEpc int `def:"10"` - // [def: 1] number of epochs over which PctCortexMax is reached - PctCortexNEpc int `def:"1" desc:"number of epochs over which PctCortexMax is reached"` + // number of epochs over which PctCortexMax is reached + PctCortexNEpc int `def:"1"` // proportion of behavioral approach sequences driven by the cortex vs. hard-coded reflexive subcortical - PctCortex float32 `inactive:"+" desc:"proportion of behavioral approach sequences driven by the cortex vs. hard-coded reflexive subcortical"` + PctCortex float32 `inactive:"+"` // for testing, force each env to use same seed - SameSeed bool `desc:"for testing, force each env to use same seed"` + SameSeed bool } // CurPctCortex returns current PctCortex and updates field, based on epoch counter @@ -47,106 +47,106 @@ func (cfg *EnvConfig) CurPctCortex(epc int) float32 { type ParamConfig struct { // PVLV parameters -- can set any field/subfield on Net.PVLV params, using standard TOML formatting - PVLV map[string]any `desc:"PVLV parameters -- can set any field/subfield on Net.PVLV params, using standard TOML formatting"` + PVLV map[string]any // network parameters - Network map[string]any `desc:"network parameters"` + Network map[string]any // Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params - Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"` + Sheet string // extra tag to add to file names and logs saved from this run - Tag string `desc:"extra tag to add to file names and logs saved from this run"` + Tag string // user note -- describe the run params etc -- like a git commit message for the run - Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"` + Note string // Name of the JSON file to input saved parameters from. - File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."` + File string `nest:"+"` // Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params - SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"` + SaveAll bool `nest:"+"` // for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time. - Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."` + Good bool `nest:"+"` } // RunConfig has config parameters related to running the sim type RunConfig struct { - // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16 - GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"` + // use the GPU for computation -- generally faster even for small models if NData ~16 + GPU bool `def:"true"` - // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. - NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."` + // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. + NData int `def:"16" min:"1"` - // [def: 0] number of parallel threads for CPU computation -- 0 = use default - NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"` + // number of parallel threads for CPU computation -- 0 = use default + NThreads int `def:"0"` - // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1 - Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"` + // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1 + Run int `def:"0"` - // [def: 5] [min: 1] total number of runs to do when running Train - NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"` + // total number of runs to do when running Train + NRuns int `def:"5" min:"1"` - // [def: 100] total number of epochs per run - NEpochs int `def:"100" desc:"total number of epochs per run"` + // total number of epochs per run + NEpochs int `def:"100"` - // [def: 128] total number of trials per epoch. Should be an even multiple of NData. - NTrials int `def:"128" desc:"total number of trials per epoch. Should be an even multiple of NData."` + // total number of trials per epoch. Should be an even multiple of NData. + NTrials int `def:"128"` - // [def: 10] how frequently (in epochs) to compute PCA on hidden representations to measure variance? - PCAInterval int `def:"10" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"` + // how frequently (in epochs) to compute PCA on hidden representations to measure variance? + PCAInterval int `def:"10"` } // LogConfig has config parameters related to logging data type LogConfig struct { // if true, save final weights after each run - SaveWts bool `desc:"if true, save final weights after each run"` + SaveWts bool - // [def: true] if true, save train epoch log to file, as .epc.tsv typically - Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"` + // if true, save train epoch log to file, as .epc.tsv typically + Epoch bool `def:"true" nest:"+"` - // [def: true] if true, save run log to file, as .run.tsv typically - Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"` + // if true, save run log to file, as .run.tsv typically + Run bool `def:"true" nest:"+"` - // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large. - Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."` + // if true, save train trial log to file, as .trl.tsv typically. May be large. + Trial bool `def:"false" nest:"+"` // if true, save network activation etc data from testing trials, for later viewing in netview - NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"` + NetData bool // activates testing mode -- records detailed data for Go CI tests (not the same as running test mode on network, via Looper) - Testing bool `desc:"activates testing mode -- records detailed data for Go CI tests (not the same as running test mode on network, via Looper)"` + Testing bool } // Config is a standard Sim config -- use as a starting point. type Config struct { // specify include files here, and after configuration, it contains list of include files added - Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"` + Includes []string - // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits - GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"` + // open the GUI -- does not automatically run -- if false, then runs automatically and quits + GUI bool `def:"true"` // log debugging information - Debug bool `desc:"log debugging information"` + Debug bool // if set, open given weights file at start of training - OpenWts string `desc:"if set, open given weights file at start of training"` + OpenWts string - // [view: add-fields] environment configuration options - Env EnvConfig `view:"add-fields" desc:"environment configuration options"` + // environment configuration options + Env EnvConfig `view:"add-fields"` - // [view: add-fields] parameter related configuration options - Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"` + // parameter related configuration options + Params ParamConfig `view:"add-fields"` - // [view: add-fields] sim running related configuration options - Run RunConfig `view:"add-fields" desc:"sim running related configuration options"` + // sim running related configuration options + Run RunConfig `view:"add-fields"` - // [view: add-fields] data logging related configuration options - Log LogConfig `view:"add-fields" desc:"data logging related configuration options"` + // data logging related configuration options + Log LogConfig `view:"add-fields"` } func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes } diff --git a/examples/boa/params.go b/examples/boa/params.go index acb226181..d8f772a5a 100644 --- a/examples/boa/params.go +++ b/examples/boa/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets is the active set of parameters -- Base is always applied, diff --git a/examples/deep_fsa/deep_fsa.go b/examples/deep_fsa/deep_fsa.go index 94f849d4b..e216698d1 100644 --- a/examples/deep_fsa/deep_fsa.go +++ b/examples/deep_fsa/deep_fsa.go @@ -11,27 +11,27 @@ import ( "os" "github.com/emer/axon/axon" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" - "github.com/emer/empi/mpi" - "github.com/emer/etable/agg" - "github.com/emer/etable/etensor" - "github.com/emer/etable/minmax" - "github.com/emer/etable/tsragg" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" + "github.com/emer/empi/v2/mpi" + "goki.dev/etable/v2/agg" + "goki.dev/etable/v2/etensor" + "goki.dev/etable/v2/minmax" + "goki.dev/etable/v2/tsragg" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/mat32/v2" ) func main() { @@ -53,37 +53,37 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] all parameter management - Params emer.NetParams `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/deep_fsa/fsa_env.go b/examples/deep_fsa/fsa_env.go index 079385931..123f22eaf 100644 --- a/examples/deep_fsa/fsa_env.go +++ b/examples/deep_fsa/fsa_env.go @@ -7,9 +7,9 @@ package main import ( "fmt" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "goki.dev/etable/v2/etensor" ) // FSAEnv generates states in a finite state automaton (FSA) which is a @@ -18,49 +18,49 @@ import ( type FSAEnv struct { // name of this environment - Nm string `desc:"name of this environment"` + Nm string // description of this environment - Dsc string `desc:"description of this environment"` + Dsc string - // [view: no-inline] transition matrix, which is a square NxN tensor with outer dim being current state and inner dim having probability of transitioning to that state - TMat etensor.Float64 `view:"no-inline" desc:"transition matrix, which is a square NxN tensor with outer dim being current state and inner dim having probability of transitioning to that state"` + // transition matrix, which is a square NxN tensor with outer dim being current state and inner dim having probability of transitioning to that state + TMat etensor.Float64 `view:"no-inline"` // transition labels, one for each transition cell in TMat matrix - Labels etensor.String `desc:"transition labels, one for each transition cell in TMat matrix"` + Labels etensor.String // automaton state within FSA that we're in - AState env.CurPrvInt `desc:"automaton state within FSA that we're in"` + AState env.CurPrvInt // number of next states in current state output (scalar) - NNext etensor.Int `desc:"number of next states in current state output (scalar)"` + NNext etensor.Int // next states that have non-zero probability, with actual randomly chosen next state at start - NextStates etensor.Int `desc:"next states that have non-zero probability, with actual randomly chosen next state at start"` + NextStates etensor.Int // transition labels for next states that have non-zero probability, with actual randomly chosen one for next state at start - NextLabels etensor.String `desc:"transition labels for next states that have non-zero probability, with actual randomly chosen one for next state at start"` + NextLabels etensor.String - // [view: inline] current run of model as provided during Init - Run env.Ctr `view:"inline" desc:"current run of model as provided during Init"` + // current run of model as provided during Init + Run env.Ctr `view:"inline"` - // [view: inline] number of times through Seq.Max number of sequences - Epoch env.Ctr `view:"inline" desc:"number of times through Seq.Max number of sequences"` + // number of times through Seq.Max number of sequences + Epoch env.Ctr `view:"inline"` - // [view: inline] sequence counter within epoch - Seq env.Ctr `view:"inline" desc:"sequence counter within epoch"` + // sequence counter within epoch + Seq env.Ctr `view:"inline"` - // [view: inline] tick counter within sequence - Tick env.Ctr `view:"inline" desc:"tick counter within sequence"` + // tick counter within sequence + Tick env.Ctr `view:"inline"` - // [view: inline] trial is the step counter within sequence - how many steps taken within current sequence -- it resets to 0 at start of each sequence - Trial env.Ctr `view:"inline" desc:"trial is the step counter within sequence - how many steps taken within current sequence -- it resets to 0 at start of each sequence"` + // trial is the step counter within sequence - how many steps taken within current sequence -- it resets to 0 at start of each sequence + Trial env.Ctr `view:"inline"` - // [view: -] random number generator for the env -- all random calls must use this -- set seed here for weight initialization values - Rand erand.SysRand `view:"-" desc:"random number generator for the env -- all random calls must use this -- set seed here for weight initialization values"` + // random number generator for the env -- all random calls must use this -- set seed here for weight initialization values + Rand erand.SysRand `view:"-"` // random seed - RndSeed int64 `inactive:"+" desc:"random seed"` + RndSeed int64 `inactive:"+"` } func (ev *FSAEnv) Name() string { return ev.Nm } diff --git a/examples/deep_fsa/params.go b/examples/deep_fsa/params.go index 690c2846b..5f67b61d5 100644 --- a/examples/deep_fsa/params.go +++ b/examples/deep_fsa/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets is the default set of parameters -- Base is always applied, and others can be optionally diff --git a/examples/deep_move/deep_move.go b/examples/deep_move/deep_move.go index 780537b1e..0451655a1 100644 --- a/examples/deep_move/deep_move.go +++ b/examples/deep_move/deep_move.go @@ -10,27 +10,27 @@ import ( "os" "github.com/emer/axon/axon" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/evec" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" - "github.com/emer/empi/mpi" - "github.com/emer/etable/etable" - _ "github.com/emer/etable/etview" // _ = include to get gui views - "github.com/emer/etable/metric" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/evec" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" + "github.com/emer/empi/v2/mpi" + "goki.dev/etable/v2/etable" + _ "goki.dev/etable/v2/etview" // _ = include to get gui views + "goki.dev/etable/v2/metric" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/mat32/v2" ) func main() { @@ -52,37 +52,37 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] all parameter management - Params emer.NetParams `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/deep_move/move_env.go b/examples/deep_move/move_env.go index 1e3162a65..48fad53cc 100644 --- a/examples/deep_move/move_env.go +++ b/examples/deep_move/move_env.go @@ -8,99 +8,99 @@ import ( "fmt" "math/rand" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/evec" - "github.com/emer/emergent/popcode" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/evec" + "github.com/emer/emergent/v2/popcode" "github.com/goki/ki/ints" "github.com/goki/ki/kit" - "github.com/goki/mat32" + "goki.dev/etable/v2/etensor" + "goki.dev/mat32/v2" ) // MoveEnv is a flat-world grid-based environment type MoveEnv struct { // name of this environment - Nm string `desc:"name of this environment"` + Nm string // update display -- turn off to make it faster - Disp bool `desc:"update display -- turn off to make it faster"` + Disp bool // size of 2D world - Size evec.Vec2i `desc:"size of 2D world"` + Size evec.Vec2i - // [view: no-inline] 2D grid world, each cell is a material (mat) - World *etensor.Int `view:"no-inline" desc:"2D grid world, each cell is a material (mat)"` + // 2D grid world, each cell is a material (mat) + World *etensor.Int `view:"no-inline"` // list of actions: starts with: Stay, Left, Right, Forward, Back, then extensible - Acts []string `desc:"list of actions: starts with: Stay, Left, Right, Forward, Back, then extensible"` + Acts []string // action map of action names to indexes - ActMap map[string]int `desc:"action map of action names to indexes"` + ActMap map[string]int // field of view in degrees, e.g., 180, must be even multiple of AngInc - FOV int `desc:"field of view in degrees, e.g., 180, must be even multiple of AngInc"` + FOV int // angle increment for rotation, in degrees -- defaults to 15 - AngInc int `desc:"angle increment for rotation, in degrees -- defaults to 15"` + AngInc int // total number of rotation angles in a circle - NRotAngles int `inactive:"+" desc:"total number of rotation angles in a circle"` + NRotAngles int `inactive:"+"` // total number of FOV rays that are traced - NFOVRays int `inactive:"+" desc:"total number of FOV rays that are traced"` + NFOVRays int `inactive:"+"` // number of units in depth population codes - DepthSize int `inactive:"+" desc:"number of units in depth population codes"` + DepthSize int `inactive:"+"` // population code for depth, in normalized units - DepthCode popcode.OneD `desc:"population code for depth, in normalized units"` + DepthCode popcode.OneD // angle population code values, in normalized units - AngCode popcode.Ring `desc:"angle population code values, in normalized units"` + AngCode popcode.Ring // number of units per localist value - UnitsPer int `desc:"number of units per localist value"` + UnitsPer int // print debug messages - Debug bool `desc:"print debug messages"` + Debug bool // proportion of times that a blank input is generated -- for testing pulvinar behavior with blank inputs - PctBlank float32 `desc:"proportion of times that a blank input is generated -- for testing pulvinar behavior with blank inputs"` + PctBlank float32 // current location of agent, floating point - PosF mat32.Vec2 `inactive:"+" desc:"current location of agent, floating point"` + PosF mat32.Vec2 `inactive:"+"` // current location of agent, integer - PosI evec.Vec2i `inactive:"+" desc:"current location of agent, integer"` + PosI evec.Vec2i `inactive:"+"` // current angle, in degrees - Angle int `inactive:"+" desc:"current angle, in degrees"` + Angle int `inactive:"+"` // angle that we just rotated -- drives vestibular - RotAng int `inactive:"+" desc:"angle that we just rotated -- drives vestibular"` + RotAng int `inactive:"+"` // last action taken - Act int `inactive:"+" desc:"last action taken"` + Act int `inactive:"+"` // depth for each angle (NFOVRays), raw - Depths []float32 `desc:"depth for each angle (NFOVRays), raw"` + Depths []float32 // depth for each angle (NFOVRays), normalized log - DepthLogs []float32 `desc:"depth for each angle (NFOVRays), normalized log"` + DepthLogs []float32 // current rendered state tensors -- extensible map - CurStates map[string]*etensor.Float32 `desc:"current rendered state tensors -- extensible map"` + CurStates map[string]*etensor.Float32 // next rendered state tensors -- updated from actions - NextStates map[string]*etensor.Float32 `desc:"next rendered state tensors -- updated from actions"` + NextStates map[string]*etensor.Float32 - // [view: -] random number generator for the env -- all random calls must use this -- set seed here for weight initialization values - Rand erand.SysRand `view:"-" desc:"random number generator for the env -- all random calls must use this -- set seed here for weight initialization values"` + // random number generator for the env -- all random calls must use this -- set seed here for weight initialization values + Rand erand.SysRand `view:"-"` // random seed - RndSeed int64 `inactive:"+" desc:"random seed"` + RndSeed int64 `inactive:"+"` } var KiT_MoveEnv = kit.Types.AddType(&MoveEnv{}, nil) diff --git a/examples/deep_move/params.go b/examples/deep_move/params.go index 8777dc6be..0086e0903 100644 --- a/examples/deep_move/params.go +++ b/examples/deep_move/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets is the default set of parameters -- Base is always applied, and others can be optionally diff --git a/examples/deep_music/deep_music.go b/examples/deep_music/deep_music.go index 4c33e4c4c..7fc2ec8dd 100644 --- a/examples/deep_music/deep_music.go +++ b/examples/deep_music/deep_music.go @@ -11,26 +11,26 @@ import ( "os" "github.com/emer/axon/axon" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" - "github.com/emer/empi/mpi" - "github.com/emer/etable/etable" - _ "github.com/emer/etable/etview" // _ = include to get gui views - "github.com/emer/etable/metric" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" + "github.com/emer/empi/v2/mpi" + "goki.dev/etable/v2/etable" + _ "goki.dev/etable/v2/etview" // _ = include to get gui views + "goki.dev/etable/v2/metric" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/mat32/v2" ) func main() { @@ -52,37 +52,37 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] all parameter management - Params emer.NetParams `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/deep_music/music_env.go b/examples/deep_music/music_env.go index eacdf3d2d..3f060adb3 100644 --- a/examples/deep_music/music_env.go +++ b/examples/deep_music/music_env.go @@ -10,14 +10,14 @@ import ( "os" "time" - "github.com/emer/emergent/env" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - "github.com/emer/etable/minmax" + "github.com/emer/emergent/v2/env" "github.com/goki/ki/ints" "gitlab.com/gomidi/midi/v2" "gitlab.com/gomidi/midi/v2/gm" "gitlab.com/gomidi/midi/v2/smf" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + "goki.dev/etable/v2/minmax" ) // MusicEnv reads in a midi SMF file and presents it as a sequence of notes. @@ -26,55 +26,55 @@ import ( type MusicEnv struct { // name of this environment - Nm string `desc:"name of this environment"` + Nm string // emit debugging messages about the music file - Debug bool `desc:"emit debugging messages about the music file"` + Debug bool // use only 1 octave of 12 notes for everything -- keeps it consistent - WrapNotes bool `desc:"use only 1 octave of 12 notes for everything -- keeps it consistent"` + WrapNotes bool - // [def: 120] number of time ticks per row in table -- note transitions that are faster than this will be lost - TicksPer int `def:"120" desc:"number of time ticks per row in table -- note transitions that are faster than this will be lost"` + // number of time ticks per row in table -- note transitions that are faster than this will be lost + TicksPer int `def:"120"` // which track to process - Track int `desc:"which track to process"` + Track int // play output as it steps - Play bool `desc:"play output as it steps"` + Play bool // limit song length to given number of steps, if > 0 - MaxSteps int `desc:"limit song length to given number of steps, if > 0"` + MaxSteps int // time offset for data parallel = Song.Rows / (NData+1) - DiOffset int `inactive:"+" desc:"time offset for data parallel = Song.Rows / (NData+1)"` + DiOffset int `inactive:"+"` // number of units per localist note value - UnitsPer int `desc:"number of units per localist note value"` + UnitsPer int // range of notes in given track - NoteRange minmax.Int `desc:"range of notes in given track"` + NoteRange minmax.Int // number of notes - NNotes int `desc:"number of notes"` + NNotes int // the song encoded into 200 msec increments, with columns as tracks - Song etable.Table `desc:"the song encoded into 200 msec increments, with columns as tracks"` + Song etable.Table - // [view: inline] current time step - Time env.Ctr `view:"inline" desc:"current time step"` + // current time step + Time env.Ctr `view:"inline"` - // current note, rendered as a 4D tensor with shape: [1, NNotes, UnitsPer, 1] - Note etensor.Float32 `desc:"current note, rendered as a 4D tensor with shape: [1, NNotes, UnitsPer, 1]"` + // current note, rendered as a 4D tensor with shape: + Note etensor.Float32 // current note index - NoteIdx int `desc:"current note index"` + NoteIdx int - // [view: -] the function for playing midi - Player func(msg midi.Message) error `view:"-" desc:"the function for playing midi"` + // the function for playing midi + Player func(msg midi.Message) error `view:"-"` - // [view: -] for playing notes - LastNotePlayed int `view:"-" desc:"for playing notes"` + // for playing notes + LastNotePlayed int `view:"-"` } func (ev *MusicEnv) Name() string { return ev.Nm } diff --git a/examples/deep_music/params.go b/examples/deep_music/params.go index 1d50f1500..4af393112 100644 --- a/examples/deep_music/params.go +++ b/examples/deep_music/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets is the default set of parameters -- Base is always applied, and others can be optionally diff --git a/examples/hip/def_params.go b/examples/hip/def_params.go index 623bbc335..887699005 100644 --- a/examples/hip/def_params.go +++ b/examples/hip/def_params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets is the default set of parameters -- Base is always applied, and others can be optionally diff --git a/examples/hip/hip.go b/examples/hip/hip.go index 11d4a8397..5a5ed5553 100644 --- a/examples/hip/hip.go +++ b/examples/hip/hip.go @@ -15,26 +15,26 @@ import ( "strings" "github.com/emer/axon/axon" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/patgen" - "github.com/emer/emergent/prjn" - "github.com/emer/empi/mpi" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - "github.com/emer/etable/metric" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/ki/bools" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/patgen" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/empi/v2/mpi" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + "goki.dev/etable/v2/metric" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/glop/bools" + "goki.dev/mat32/v2" ) func main() { @@ -60,67 +60,67 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] all parameter management - Params emer.NetParams `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs // if true, run in pretrain mode - PretrainMode bool `desc:"if true, run in pretrain mode"` + PretrainMode bool - // [view: no-inline] pool patterns vocabulary - PoolVocab patgen.Vocab `view:"no-inline" desc:"pool patterns vocabulary"` + // pool patterns vocabulary + PoolVocab patgen.Vocab `view:"no-inline"` - // [view: no-inline] AB training patterns to use - TrainAB *etable.Table `view:"no-inline" desc:"AB training patterns to use"` + // AB training patterns to use + TrainAB *etable.Table `view:"no-inline"` - // [view: no-inline] AC training patterns to use - TrainAC *etable.Table `view:"no-inline" desc:"AC training patterns to use"` + // AC training patterns to use + TrainAC *etable.Table `view:"no-inline"` - // [view: no-inline] AB testing patterns to use - TestAB *etable.Table `view:"no-inline" desc:"AB testing patterns to use"` + // AB testing patterns to use + TestAB *etable.Table `view:"no-inline"` - // [view: no-inline] AC testing patterns to use - TestAC *etable.Table `view:"no-inline" desc:"AC testing patterns to use"` + // AC testing patterns to use + TestAC *etable.Table `view:"no-inline"` - // [view: no-inline] Lure pretrain patterns to use - PreTrainLure *etable.Table `view:"no-inline" desc:"Lure pretrain patterns to use"` + // Lure pretrain patterns to use + PreTrainLure *etable.Table `view:"no-inline"` - // [view: no-inline] Lure testing patterns to use - TestLure *etable.Table `view:"no-inline" desc:"Lure testing patterns to use"` + // Lure testing patterns to use + TestLure *etable.Table `view:"no-inline"` - // [view: no-inline] all training patterns -- for pretrain - TrainAll *etable.Table `view:"no-inline" desc:"all training patterns -- for pretrain"` + // all training patterns -- for pretrain + TrainAll *etable.Table `view:"no-inline"` - // [view: no-inline] TestAB + TestAC - TestABAC *etable.Table `view:"no-inline" desc:"TestAB + TestAC"` + // TestAB + TestAC + TestABAC *etable.Table `view:"no-inline"` - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/hip/orig_params.go b/examples/hip/orig_params.go index 7cc9c0115..7faf2a05a 100644 --- a/examples/hip/orig_params.go +++ b/examples/hip/orig_params.go @@ -6,7 +6,7 @@ package main -import "github.com/emer/emergent/params" +import "github.com/emer/emergent/v2/params" // OrigParamSets is the original hip model params, prior to optimization in 2/2020 var OrigParamSets = params.Sets{ diff --git a/examples/inhib/config.go b/examples/inhib/config.go index 0afe00107..b1a76aea4 100644 --- a/examples/inhib/config.go +++ b/examples/inhib/config.go @@ -4,7 +4,7 @@ package main -import "github.com/emer/emergent/evec" +import "github.com/emer/emergent/v2/evec" // EnvConfig has config params for environment // note: only adding fields for key Env params that matter for both Network and Env @@ -12,86 +12,86 @@ import "github.com/emer/emergent/evec" type EnvConfig struct { // env parameters -- can set any field/subfield on Env struct, using standard TOML formatting - Env map[string]any `desc:"env parameters -- can set any field/subfield on Env struct, using standard TOML formatting"` + Env map[string]any - // [def: 15] [min: 5] [max: 50] [step: 1] percent of active units in input layer (literally number of active units, because input has 100 units total) - InputPct float32 `def:"15" min:"5" max:"50" step:"1" desc:"percent of active units in input layer (literally number of active units, because input has 100 units total)"` + // percent of active units in input layer (literally number of active units, because input has 100 units total) + InputPct float32 `def:"15" min:"5" max:"50" step:"1"` } // ParamConfig has config parameters related to sim params type ParamConfig struct { // network parameters - Network map[string]any `desc:"network parameters"` + Network map[string]any - // [def: 2] [min: 1] number of hidden layers to add - NLayers int `def:"2" min:"1" desc:"number of hidden layers to add"` + // number of hidden layers to add + NLayers int `def:"2" min:"1"` - // [def: {'X':10,'Y':10}] size of hidden layers - HidSize evec.Vec2i `def:"{'X':10,'Y':10}" desc:"size of hidden layers"` + // size of hidden layers + HidSize evec.Vec2i `def:"{'X':10,'Y':10}"` // Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params - Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"` + Sheet string // extra tag to add to file names and logs saved from this run - Tag string `desc:"extra tag to add to file names and logs saved from this run"` + Tag string // user note -- describe the run params etc -- like a git commit message for the run - Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"` + Note string // Name of the JSON file to input saved parameters from. - File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."` + File string `nest:"+"` // Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params - SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"` + SaveAll bool `nest:"+"` // for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time. - Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."` + Good bool `nest:"+"` } // RunConfig has config parameters related to running the sim type RunConfig struct { - // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16 - GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"` + // use the GPU for computation -- generally faster even for small models if NData ~16 + GPU bool `def:"true"` } // LogConfig has config parameters related to logging data type LogConfig struct { - // [def: true] if true, save train epoch log to file, as .epc.tsv typically - Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"` + // if true, save train epoch log to file, as .epc.tsv typically + Epoch bool `def:"true" nest:"+"` - // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large. - Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."` + // if true, save train trial log to file, as .trl.tsv typically. May be large. + Trial bool `def:"false" nest:"+"` // if true, save network activation etc data from testing trials, for later viewing in netview - NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"` + NetData bool } // Config is a standard Sim config -- use as a starting point. type Config struct { // specify include files here, and after configuration, it contains list of include files added - Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"` + Includes []string - // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits - GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"` + // open the GUI -- does not automatically run -- if false, then runs automatically and quits + GUI bool `def:"true"` // log debugging information - Debug bool `desc:"log debugging information"` + Debug bool - // [view: add-fields] environment configuration options - Env EnvConfig `view:"add-fields" desc:"environment configuration options"` + // environment configuration options + Env EnvConfig `view:"add-fields"` - // [view: add-fields] parameter related configuration options - Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"` + // parameter related configuration options + Params ParamConfig `view:"add-fields"` - // [view: add-fields] sim running related configuration options - Run RunConfig `view:"add-fields" desc:"sim running related configuration options"` + // sim running related configuration options + Run RunConfig `view:"add-fields"` - // [view: add-fields] data logging related configuration options - Log LogConfig `view:"add-fields" desc:"data logging related configuration options"` + // data logging related configuration options + Log LogConfig `view:"add-fields"` } func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes } diff --git a/examples/inhib/inhib.go b/examples/inhib/inhib.go index 6c4d35290..3f8f19547 100644 --- a/examples/inhib/inhib.go +++ b/examples/inhib/inhib.go @@ -15,25 +15,25 @@ import ( "os" "github.com/emer/axon/axon" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/patgen" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" - "github.com/emer/empi/mpi" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - _ "github.com/emer/etable/etview" // include to get gui views - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/patgen" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" + "github.com/emer/empi/v2/mpi" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + _ "goki.dev/etable/v2/etview" // include to get gui views + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/mat32/v2" ) func main() { @@ -57,37 +57,37 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] all parameter management - Params emer.NetParams `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] the training patterns to use - Pats *etable.Table `view:"no-inline" desc:"the training patterns to use"` + // the training patterns to use + Pats *etable.Table `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/inhib/params.go b/examples/inhib/params.go index 8c7103991..b2cef968c 100644 --- a/examples/inhib/params.go +++ b/examples/inhib/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets is the default set of parameters -- Base is always applied, and others can be optionally diff --git a/examples/kinaseq/kinaseq.go b/examples/kinaseq/kinaseq.go index 99af23e91..bf2e53e1e 100644 --- a/examples/kinaseq/kinaseq.go +++ b/examples/kinaseq/kinaseq.go @@ -12,16 +12,16 @@ import ( "math/rand" "github.com/emer/axon/axon" - "github.com/emer/emergent/emer" - "github.com/emer/etable/agg" - "github.com/emer/etable/eplot" - "github.com/emer/etable/etable" - _ "github.com/emer/etable/etview" // include to get gui views - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/gi/giv" + "github.com/emer/emergent/v2/emer" "github.com/goki/ki/ki" - "github.com/goki/mat32" + "goki.dev/etable/v2/agg" + "goki.dev/etable/v2/eplot" + "goki.dev/etable/v2/etable" + _ "goki.dev/etable/v2/etview" // include to get gui views + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/gi/v2/giv" + "goki.dev/mat32/v2" ) func main() { @@ -42,95 +42,95 @@ const LogPrec = 4 // Sim holds the params, table, etc type Sim struct { - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: no-inline] the sending neuron - SendNeur *axon.Neuron `view:"no-inline" desc:"the sending neuron"` + // the sending neuron + SendNeur *axon.Neuron `view:"no-inline"` - // [view: no-inline] the receiving neuron - RecvNeur *axon.Neuron `view:"no-inline" desc:"the receiving neuron"` + // the receiving neuron + RecvNeur *axon.Neuron `view:"no-inline"` - // [view: no-inline] prjn-level parameters -- for intializing synapse -- other params not used - Prjn *axon.Prjn `view:"no-inline" desc:"prjn-level parameters -- for intializing synapse -- other params not used"` + // prjn-level parameters -- for intializing synapse -- other params not used + Prjn *axon.Prjn `view:"no-inline"` - // [view: no-inline] extra neuron state - NeuronEx NeuronEx `view:"no-inline" desc:"extra neuron state"` + // extra neuron state + NeuronEx NeuronEx `view:"no-inline"` - // [view: inline] all parameter management - Params emer.Params `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.Params `view:"inline"` // multiplier on product factor to equate to SynC - PGain float32 `desc:"multiplier on product factor to equate to SynC"` + PGain float32 // spike multiplier for display purposes - SpikeDisp float32 `desc:"spike multiplier for display purposes"` + SpikeDisp float32 // use current Ge clamping for recv neuron -- otherwise spikes driven externally - RGeClamp bool `desc:"use current Ge clamping for recv neuron -- otherwise spikes driven externally"` + RGeClamp bool // gain multiplier for RGe clamp - RGeGain float32 `desc:"gain multiplier for RGe clamp"` + RGeGain float32 // baseline recv Ge level - RGeBase float32 `desc:"baseline recv Ge level"` + RGeBase float32 // baseline recv Gi level - RGiBase float32 `desc:"baseline recv Gi level"` + RGiBase float32 // number of repetitions -- if > 1 then only final @ end of Dur shown - NTrials int `desc:"number of repetitions -- if > 1 then only final @ end of Dur shown"` + NTrials int // number of msec in minus phase - MinusMsec int `desc:"number of msec in minus phase"` + MinusMsec int // number of msec in plus phase - PlusMsec int `desc:"number of msec in plus phase"` + PlusMsec int // quiet space between spiking - ISIMsec int `desc:"quiet space between spiking"` + ISIMsec int - // [view: -] total trial msec: minus, plus isi - TrialMsec int `view:"-" desc:"total trial msec: minus, plus isi"` + // total trial msec: minus, plus isi + TrialMsec int `view:"-"` // minus phase firing frequency - MinusHz int `desc:"minus phase firing frequency"` + MinusHz int // plus phase firing frequency - PlusHz int `desc:"plus phase firing frequency"` + PlusHz int // additive difference in sending firing frequency relative to recv (recv has basic minus, plus) - SendDiffHz int `desc:"additive difference in sending firing frequency relative to recv (recv has basic minus, plus)"` + SendDiffHz int - // [view: no-inline] synapse state values, NST_ in log - SynNeurTheta axon.Synapse `view:"no-inline" desc:"synapse state values, NST_ in log"` + // synapse state values, NST_ in log + SynNeurTheta axon.Synapse `view:"no-inline"` - // [view: no-inline] synapse state values, SST_ in log - SynSpkTheta axon.Synapse `view:"no-inline" desc:"synapse state values, SST_ in log"` + // synapse state values, SST_ in log + SynSpkTheta axon.Synapse `view:"no-inline"` - // [view: no-inline] synapse state values, SSC_ in log - SynSpkCont axon.Synapse `view:"no-inline" desc:"synapse state values, SSC_ in log"` + // synapse state values, SSC_ in log + SynSpkCont axon.Synapse `view:"no-inline"` - // [view: no-inline] synapse state values, SNC_ in log - SynNMDACont axon.Synapse `view:"no-inline" desc:"synapse state values, SNC_ in log"` + // synapse state values, SNC_ in log + SynNMDACont axon.Synapse `view:"no-inline"` // axon time recording - Context axon.Context `desc:"axon time recording"` + Context axon.Context - // [view: no-inline] all logs - Logs map[string]*etable.Table `view:"no-inline" desc:"all logs"` + // all logs + Logs map[string]*etable.Table `view:"no-inline"` - // [view: -] all plots - Plots map[string]*eplot.Plot2D `view:"-" desc:"all plots"` + // all plots + Plots map[string]*eplot.Plot2D `view:"-"` - // [view: -] main GUI window - Win *gi.Window `view:"-" desc:"main GUI window"` + // main GUI window + Win *gi.Window `view:"-"` - // [view: -] the master toolbar - ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"` + // the master toolbar + ToolBar *gi.ToolBar `view:"-"` - // [view: -] stop button - StopNow bool `view:"-" desc:"stop button"` + // stop button + StopNow bool `view:"-"` } // TheSim is the overall state for this simulation diff --git a/examples/kinaseq/neuron.go b/examples/kinaseq/neuron.go index c46bc5138..04df2f73f 100644 --- a/examples/kinaseq/neuron.go +++ b/examples/kinaseq/neuron.go @@ -12,12 +12,12 @@ import ( "strings" "github.com/emer/axon/axon" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/etable/eplot" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "goki.dev/etable/v2/eplot" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" ) // ParamSets for basic parameters @@ -81,22 +81,22 @@ var ParamSets = params.Sets{ type NeuronEx struct { // time of last sending spike - SCaUpT int `desc:"time of last sending spike"` + SCaUpT int // time of last recv spike - RCaUpT int `desc:"time of last recv spike"` + RCaUpT int // sending poisson firing probability accumulator - Sp float32 `desc:"sending poisson firing probability accumulator"` + Sp float32 // recv poisson firing probability accumulator - Rp float32 `desc:"recv poisson firing probability accumulator"` + Rp float32 // NMDA mg-based blocking conductance - NMDAGmg float32 `desc:"NMDA mg-based blocking conductance"` + NMDAGmg float32 // when 0, it is time to learn according to theta cycle, otherwise increments up unless still -1 from init - LearnNow float32 `desc:"when 0, it is time to learn according to theta cycle, otherwise increments up unless still -1 from init"` + LearnNow float32 } func (nex *NeuronEx) Init() { diff --git a/examples/mpi/params.go b/examples/mpi/params.go index 6d80d1043..ce366033a 100644 --- a/examples/mpi/params.go +++ b/examples/mpi/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets sets the minimal non-default params diff --git a/examples/mpi/ra25.go b/examples/mpi/ra25.go index 76b4e9b3d..f70186ee7 100644 --- a/examples/mpi/ra25.go +++ b/examples/mpi/ra25.go @@ -12,26 +12,26 @@ import ( "os" "github.com/emer/axon/axon" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/evec" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/patgen" - "github.com/emer/emergent/prjn" - "github.com/emer/empi/empi" - "github.com/emer/empi/mpi" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/evec" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/patgen" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/empi/v2/empi" + "github.com/emer/empi/v2/mpi" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/mat32/v2" ) func main() { @@ -51,115 +51,115 @@ func main() { type ParamConfig struct { // network parameters - Network map[string]any `desc:"network parameters"` + Network map[string]any - // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers - Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"` + // size of hidden layer -- can use emer.LaySize for 4D layers + Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"` - // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers - Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"` + // size of hidden layer -- can use emer.LaySize for 4D layers + Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"` // Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params - Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"` + Sheet string // extra tag to add to file names and logs saved from this run - Tag string `desc:"extra tag to add to file names and logs saved from this run"` + Tag string // user note -- describe the run params etc -- like a git commit message for the run - Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"` + Note string // Name of the JSON file to input saved parameters from. - File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."` + File string `nest:"+"` // Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params - SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"` + SaveAll bool `nest:"+"` // for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time. - Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."` + Good bool `nest:"+"` } // RunConfig has config parameters related to running the sim type RunConfig struct { // use MPI message passing interface for data parallel computation between nodes running identical copies of the same sim, sharing DWt changes - MPI bool `desc:"use MPI message passing interface for data parallel computation between nodes running identical copies of the same sim, sharing DWt changes"` + MPI bool - // [def: false] use the GPU for computation -- generally faster even for small models if NData ~16 - GPU bool `def:"false" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"` + // use the GPU for computation -- generally faster even for small models if NData ~16 + GPU bool `def:"false"` - // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. - NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."` + // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. + NData int `def:"16" min:"1"` - // [def: 0] number of parallel threads for CPU computation -- 0 = use default - NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"` + // number of parallel threads for CPU computation -- 0 = use default + NThreads int `def:"0"` - // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1 - Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"` + // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1 + Run int `def:"0"` - // [def: 5] [min: 1] total number of runs to do when running Train - NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"` + // total number of runs to do when running Train + NRuns int `def:"5" min:"1"` - // [def: 100] total number of epochs per run - NEpochs int `def:"100" desc:"total number of epochs per run"` + // total number of epochs per run + NEpochs int `def:"100"` - // [def: 2] stop run after this number of perfect, zero-error epochs - NZero int `def:"2" desc:"stop run after this number of perfect, zero-error epochs"` + // stop run after this number of perfect, zero-error epochs + NZero int `def:"2"` - // [def: 32] total number of trials per epoch. Should be an even multiple of NData. - NTrials int `def:"32" desc:"total number of trials per epoch. Should be an even multiple of NData."` + // total number of trials per epoch. Should be an even multiple of NData. + NTrials int `def:"32"` - // [def: 5] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing - TestInterval int `def:"5" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"` + // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing + TestInterval int `def:"5"` - // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance? - PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"` + // how frequently (in epochs) to compute PCA on hidden representations to measure variance? + PCAInterval int `def:"5"` } // LogConfig has config parameters related to logging data type LogConfig struct { // if true, save final weights after each run - SaveWts bool `desc:"if true, save final weights after each run"` + SaveWts bool - // [def: true] if true, save train epoch log to file, as .epc.tsv typically - Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"` + // if true, save train epoch log to file, as .epc.tsv typically + Epoch bool `def:"true" nest:"+"` - // [def: true] if true, save run log to file, as .run.tsv typically - Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"` + // if true, save run log to file, as .run.tsv typically + Run bool `def:"true" nest:"+"` - // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large. - Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."` + // if true, save train trial log to file, as .trl.tsv typically. May be large. + Trial bool `def:"false" nest:"+"` - // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there. - TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."` + // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there. + TestEpoch bool `def:"false" nest:"+"` - // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large. - TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."` + // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large. + TestTrial bool `def:"false" nest:"+"` // if true, save network activation etc data from testing trials, for later viewing in netview - NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"` + NetData bool } // Config is a standard Sim config -- use as a starting point. type Config struct { // specify include files here, and after configuration, it contains list of include files added - Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"` + Includes []string - // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits - GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"` + // open the GUI -- does not automatically run -- if false, then runs automatically and quits + GUI bool `def:"true"` // log debugging information - Debug bool `desc:"log debugging information"` + Debug bool - // [view: add-fields] parameter related configuration options - Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"` + // parameter related configuration options + Params ParamConfig `view:"add-fields"` - // [view: add-fields] sim running related configuration options - Run RunConfig `view:"add-fields" desc:"sim running related configuration options"` + // sim running related configuration options + Run RunConfig `view:"add-fields"` - // [view: add-fields] data logging related configuration options - Log LogConfig `view:"add-fields" desc:"data logging related configuration options"` + // data logging related configuration options + Log LogConfig `view:"add-fields"` } func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes } @@ -172,46 +172,46 @@ func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes } type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] network parameter management - Params emer.NetParams `view:"inline" desc:"network parameter management"` + // network parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] the training patterns to use - Pats *etable.Table `view:"no-inline" desc:"the training patterns to use"` + // the training patterns to use + Pats *etable.Table `view:"no-inline"` - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` - // [view: -] mpi communicator - Comm *mpi.Comm `view:"-" desc:"mpi communicator"` + // mpi communicator + Comm *mpi.Comm `view:"-"` - // [view: -] buffer of all dwt weight changes -- for mpi sharing - AllDWts []float32 `view:"-" desc:"buffer of all dwt weight changes -- for mpi sharing"` + // buffer of all dwt weight changes -- for mpi sharing + AllDWts []float32 `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/neuron/neuron.go b/examples/neuron/neuron.go index 85952da8a..e6c289618 100644 --- a/examples/neuron/neuron.go +++ b/examples/neuron/neuron.go @@ -15,28 +15,28 @@ import ( "os" "github.com/emer/axon/axon" - "github.com/emer/emergent/ecmd" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/empi/mpi" - "github.com/emer/etable/eplot" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - _ "github.com/emer/etable/etview" // include to get gui views - "github.com/emer/etable/minmax" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/gi/giv" + "github.com/emer/emergent/v2/ecmd" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/empi/v2/mpi" "github.com/goki/ki/ki" - "github.com/goki/mat32" + "goki.dev/etable/v2/eplot" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + _ "goki.dev/etable/v2/etview" // include to get gui views + "goki.dev/etable/v2/minmax" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/gi/v2/giv" + "goki.dev/mat32/v2" ) func main() { @@ -79,7 +79,7 @@ var ParamSets = netparams.Sets{ type NeuronEx struct { // input ISI countdown for spiking mode -- counts up - InISI float32 `desc:"input ISI countdown for spiking mode -- counts up"` + InISI float32 } func (nrn *NeuronEx) Init() { @@ -94,49 +94,49 @@ func (nrn *NeuronEx) Init() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: no-inline] extra neuron state for additional channels: VGCC, AK - NeuronEx NeuronEx `view:"no-inline" desc:"extra neuron state for additional channels: VGCC, AK"` + // extra neuron state for additional channels: VGCC, AK + NeuronEx NeuronEx `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats - // [view: no-inline] logging - Logs elog.Logs `view:"no-inline" desc:"logging"` + // logging + Logs elog.Logs `view:"no-inline"` - // [view: inline] all parameter management - Params emer.NetParams `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.NetParams `view:"inline"` // current cycle of updating - Cycle int `inactive:"+" desc:"current cycle of updating"` + Cycle int `inactive:"+"` - // [view: -] main GUI window - Win *gi.Window `view:"-" desc:"main GUI window"` + // main GUI window + Win *gi.Window `view:"-"` - // [view: -] the network viewer - NetView *netview.NetView `view:"-" desc:"the network viewer"` + // the network viewer + NetView *netview.NetView `view:"-"` - // [view: -] the master toolbar - ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"` + // the master toolbar + ToolBar *gi.ToolBar `view:"-"` - // [view: -] the test-trial plot - TstCycPlot *eplot.Plot2D `view:"-" desc:"the test-trial plot"` + // the test-trial plot + TstCycPlot *eplot.Plot2D `view:"-"` - // [view: -] map of values for detailed debugging / testing - ValMap map[string]float32 `view:"-" desc:"map of values for detailed debugging / testing"` + // map of values for detailed debugging / testing + ValMap map[string]float32 `view:"-"` - // [view: -] true if sim is running - IsRunning bool `view:"-" desc:"true if sim is running"` + // true if sim is running + IsRunning bool `view:"-"` - // [view: -] flag to stop running - StopNow bool `view:"-" desc:"flag to stop running"` + // flag to stop running + StopNow bool `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/neuron/neuron_test.go b/examples/neuron/neuron_test.go index 2944be9e3..608db7f02 100644 --- a/examples/neuron/neuron_test.go +++ b/examples/neuron/neuron_test.go @@ -7,7 +7,7 @@ import ( "strings" "testing" - "github.com/goki/mat32" + "goki.dev/mat32/v2" "golang.org/x/exp/maps" ) diff --git a/examples/pcore/gono_env.go b/examples/pcore/gono_env.go index 832721a4b..0626e470c 100644 --- a/examples/pcore/gono_env.go +++ b/examples/pcore/gono_env.go @@ -7,91 +7,91 @@ package main import ( "math/rand" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/popcode" - "github.com/emer/etable/etensor" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/popcode" + "goki.dev/etable/v2/etensor" + "goki.dev/mat32/v2" ) // GoNoEnv implements simple Go vs. NoGo input patterns to test BG learning. type GoNoEnv struct { // name of environment -- Train or Test - Nm string `desc:"name of environment -- Train or Test"` + Nm string // training or testing env? - Mode etime.Modes `desc:"training or testing env?"` + Mode etime.Modes // trial counter -- set by caller for testing - Trial env.Ctr `desc:"trial counter -- set by caller for testing"` + Trial env.Ctr // activation of ACC positive valence -- drives go - ACCPos float32 `desc:"activation of ACC positive valence -- drives go"` + ACCPos float32 // activation of ACC neg valence -- drives nogo - ACCNeg float32 `desc:"activation of ACC neg valence -- drives nogo"` + ACCNeg float32 // threshold on diff between ACCPos - ACCNeg for counting as a Go trial - PosNegThr float32 `desc:"threshold on diff between ACCPos - ACCNeg for counting as a Go trial"` + PosNegThr float32 // ACCPos and Neg are set manually -- do not generate random vals for training or auto-increment ACCPos / Neg values during test - ManualVals bool `desc:"ACCPos and Neg are set manually -- do not generate random vals for training or auto-increment ACCPos / Neg values during test"` + ManualVals bool // increment in testing activation for test all - TestInc float32 `desc:"increment in testing activation for test all"` + TestInc float32 // number of repetitions per testing level - TestReps int `desc:"number of repetitions per testing level"` + TestReps int - // [view: -] number of pools for representing multiple different options to be evaluated in parallel, vs. 1 pool with a simple go nogo overall choice -- currently tested / configured for the 1 pool case - NPools int `view:"-" desc:"number of pools for representing multiple different options to be evaluated in parallel, vs. 1 pool with a simple go nogo overall choice -- currently tested / configured for the 1 pool case"` + // number of pools for representing multiple different options to be evaluated in parallel, vs. 1 pool with a simple go nogo overall choice -- currently tested / configured for the 1 pool case + NPools int `view:"-"` // for case with multiple pools evaluated in parallel (not currently used), this is the across-pools multiplier in activation of ACC positive valence -- e.g., .9 daecrements subsequent units by 10% - ACCPosInc float32 `desc:"for case with multiple pools evaluated in parallel (not currently used), this is the across-pools multiplier in activation of ACC positive valence -- e.g., .9 daecrements subsequent units by 10%"` + ACCPosInc float32 // for case with multiple pools evaluated in parallel (not currently used), this is the across-pools multiplier in activation of ACC neg valence, e.g., 1.1 increments subsequent units by 10% - ACCNegInc float32 `desc:"for case with multiple pools evaluated in parallel (not currently used), this is the across-pools multiplier in activation of ACC neg valence, e.g., 1.1 increments subsequent units by 10%"` + ACCNegInc float32 - // [view: -] number of units within each pool, Y - NUnitsY int `view:"-" desc:"number of units within each pool, Y"` + // number of units within each pool, Y + NUnitsY int `view:"-"` - // [view: -] number of units within each pool, X - NUnitsX int `view:"-" desc:"number of units within each pool, X"` + // number of units within each pool, X + NUnitsX int `view:"-"` - // [view: -] total number of units within each pool - NUnits int `view:"-" desc:"total number of units within each pool"` + // total number of units within each pool + NUnits int `view:"-"` // number of different values for PFC to learn in input layer -- gives PFC network something to do - InN int `desc:"number of different values for PFC to learn in input layer -- gives PFC network something to do"` + InN int // pop code the values in ACCPos and Neg - PopCode popcode.OneD `desc:"pop code the values in ACCPos and Neg"` + PopCode popcode.OneD - // [view: -] random number generator for the env -- all random calls must use this - Rand erand.SysRand `view:"-" desc:"random number generator for the env -- all random calls must use this"` + // random number generator for the env -- all random calls must use this + Rand erand.SysRand `view:"-"` // random seed - RndSeed int64 `inactive:"+" desc:"random seed"` + RndSeed int64 `inactive:"+"` // named states: ACCPos, ACCNeg - States map[string]*etensor.Float32 `desc:"named states: ACCPos, ACCNeg"` + States map[string]*etensor.Float32 // true if Pos - Neg > Thr - Should bool `inactive:"+" desc:"true if Pos - Neg > Thr"` + Should bool `inactive:"+"` // true if model gated on this trial - Gated bool `inactive:"+" desc:"true if model gated on this trial"` + Gated bool `inactive:"+"` // true if gated == should - Match bool `inactive:"+" desc:"true if gated == should"` + Match bool `inactive:"+"` // reward based on match between Should vs. Gated - Rew float32 `inactive:"+" desc:"reward based on match between Should vs. Gated"` + Rew float32 `inactive:"+"` // input counter -- gives PFC network something to do - InCtr int `inactive:"+" desc:"input counter -- gives PFC network something to do"` + InCtr int `inactive:"+"` } func (ev *GoNoEnv) Name() string { diff --git a/examples/pcore/params.go b/examples/pcore/params.go index c68028e9f..59137aba7 100644 --- a/examples/pcore/params.go +++ b/examples/pcore/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets is the default set of parameters -- Base is always applied, diff --git a/examples/pcore/pcore.go b/examples/pcore/pcore.go index 832c8df72..abd916ac1 100644 --- a/examples/pcore/pcore.go +++ b/examples/pcore/pcore.go @@ -13,28 +13,28 @@ import ( "strconv" "github.com/emer/axon/axon" - "github.com/emer/emergent/ecmd" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/empi/mpi" - "github.com/emer/etable/agg" - "github.com/emer/etable/eplot" - "github.com/emer/etable/etable" - "github.com/emer/etable/split" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/ki/bools" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/ecmd" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/empi/v2/mpi" + "goki.dev/etable/v2/agg" + "goki.dev/etable/v2/eplot" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/split" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/glop/bools" + "goki.dev/mat32/v2" ) func main() { @@ -58,37 +58,37 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] all parameter management - Params emer.NetParams `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/pcore/pcore_test.go b/examples/pcore/pcore_test.go index 607930802..401da2831 100644 --- a/examples/pcore/pcore_test.go +++ b/examples/pcore/pcore_test.go @@ -4,7 +4,7 @@ import ( "os" "testing" - "github.com/emer/etable/tsragg" + "goki.dev/etable/v2/tsragg" ) func TestPCore(t *testing.T) { diff --git a/examples/pvlv/effort_plot.go b/examples/pvlv/effort_plot.go index 0379e60b0..c2dbf3ff5 100644 --- a/examples/pvlv/effort_plot.go +++ b/examples/pvlv/effort_plot.go @@ -10,17 +10,17 @@ import ( "strconv" "github.com/emer/axon/axon" - "github.com/emer/emergent/erand" - "github.com/emer/etable/eplot" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - _ "github.com/emer/etable/etview" // include to get gui views - "github.com/emer/etable/minmax" - "github.com/goki/gi/gi" - "github.com/goki/gi/giv" - "github.com/goki/ki/bools" + "github.com/emer/emergent/v2/erand" "github.com/goki/ki/ki" - "github.com/goki/mat32" + "goki.dev/etable/v2/eplot" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + _ "goki.dev/etable/v2/etview" // include to get gui views + "goki.dev/etable/v2/minmax" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/giv" + "goki.dev/glop/bools" + "goki.dev/mat32/v2" ) func DriveEffortGUI() { @@ -37,40 +37,40 @@ const LogPrec = 4 type DrEffPlot struct { // context just for plotting - Context axon.Context `desc:"context just for plotting"` + Context axon.Context // PVLV params - PVLV axon.PVLV `desc:"PVLV params"` + PVLV axon.PVLV // total number of time steps to simulate - TimeSteps int `desc:"total number of time steps to simulate"` + TimeSteps int // range for number of time steps between US receipt - USTime minmax.Int `desc:"range for number of time steps between US receipt"` + USTime minmax.Int // range for random effort per step - Effort minmax.F32 `desc:"range for random effort per step"` + Effort minmax.F32 - // [view: no-inline] table for plot - Table *etable.Table `view:"no-inline" desc:"table for plot"` + // table for plot + Table *etable.Table `view:"no-inline"` - // [view: -] the plot - Plot *eplot.Plot2D `view:"-" desc:"the plot"` + // the plot + Plot *eplot.Plot2D `view:"-"` - // [view: no-inline] table for plot - TimeTable *etable.Table `view:"no-inline" desc:"table for plot"` + // table for plot + TimeTable *etable.Table `view:"no-inline"` - // [view: -] the plot - TimePlot *eplot.Plot2D `view:"-" desc:"the plot"` + // the plot + TimePlot *eplot.Plot2D `view:"-"` - // [view: -] main GUI window - Win *gi.Window `view:"-" desc:"main GUI window"` + // main GUI window + Win *gi.Window `view:"-"` - // [view: -] the master toolbar - ToolBar *gi.ToolBar `view:"-" desc:"the master toolbar"` + // the master toolbar + ToolBar *gi.ToolBar `view:"-"` - // [view: -] random number generator - Rand erand.SysRand `view:"-" desc:"random number generator"` + // random number generator + Rand erand.SysRand `view:"-"` } // Config configures all the elements using the standard functions diff --git a/examples/pvlv/params.go b/examples/pvlv/params.go index b7ea3bf79..bcaf7740b 100644 --- a/examples/pvlv/params.go +++ b/examples/pvlv/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets is the default set of parameters -- Base is always applied, diff --git a/examples/pvlv/pvlv.go b/examples/pvlv/pvlv.go index e0510b9e0..2e7b3dda6 100644 --- a/examples/pvlv/pvlv.go +++ b/examples/pvlv/pvlv.go @@ -15,30 +15,30 @@ import ( "github.com/emer/axon/axon" "github.com/emer/axon/examples/pvlv/cond" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/empi/mpi" - "github.com/emer/etable/agg" - "github.com/emer/etable/eplot" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - "github.com/emer/etable/minmax" - "github.com/emer/etable/split" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/empi/v2/mpi" "github.com/goki/ki/ki" "github.com/goki/ki/kit" - "github.com/goki/mat32" + "goki.dev/etable/v2/agg" + "goki.dev/etable/v2/eplot" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + "goki.dev/etable/v2/minmax" + "goki.dev/etable/v2/split" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/mat32/v2" ) func main() { @@ -62,37 +62,37 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] all parameter management - Params emer.NetParams `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/pvlv/pvlv_test.go b/examples/pvlv/pvlv_test.go index ef68e2858..5be21c1c8 100644 --- a/examples/pvlv/pvlv_test.go +++ b/examples/pvlv/pvlv_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/emer/emergent/etime" + "github.com/emer/emergent/v2/etime" ) // basic pos acq, ext diff --git a/examples/ra25/params.go b/examples/ra25/params.go index 6d80d1043..ce366033a 100644 --- a/examples/ra25/params.go +++ b/examples/ra25/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets sets the minimal non-default params diff --git a/examples/ra25/ra25.go b/examples/ra25/ra25.go index 94f4eee8e..d65d505d6 100644 --- a/examples/ra25/ra25.go +++ b/examples/ra25/ra25.go @@ -13,25 +13,25 @@ import ( "os" "github.com/emer/axon/axon" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/evec" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/patgen" - "github.com/emer/emergent/prjn" - "github.com/emer/empi/mpi" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/evec" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/patgen" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/empi/v2/mpi" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/mat32/v2" ) func main() { @@ -39,7 +39,7 @@ func main() { sim.New() sim.ConfigAll() if sim.Config.GUI { - gimain.Main(sim.RunGUI) + gimain.Run(sim.RunGUI) } else { sim.RunNoGUI() } @@ -51,115 +51,115 @@ func main() { type ParamConfig struct { // network parameters - Network map[string]any `desc:"network parameters"` + Network map[string]any - // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers - Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"` + // size of hidden layer -- can use emer.LaySize for 4D layers + Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"` - // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers - Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"` + // size of hidden layer -- can use emer.LaySize for 4D layers + Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"` // Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params - Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"` + Sheet string // extra tag to add to file names and logs saved from this run - Tag string `desc:"extra tag to add to file names and logs saved from this run"` + Tag string // user note -- describe the run params etc -- like a git commit message for the run - Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"` + Note string // Name of the JSON file to input saved parameters from. - File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."` + File string `nest:"+"` // Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params - SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"` + SaveAll bool `nest:"+"` // for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time. - Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."` + Good bool `nest:"+"` } // RunConfig has config parameters related to running the sim type RunConfig struct { - // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16 - GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"` + // use the GPU for computation -- generally faster even for small models if NData ~16 + GPU bool `def:"true"` - // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. - NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."` + // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. + NData int `def:"16" min:"1"` - // [def: 0] number of parallel threads for CPU computation -- 0 = use default - NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"` + // number of parallel threads for CPU computation -- 0 = use default + NThreads int `def:"0"` - // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1 - Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"` + // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1 + Run int `def:"0"` - // [def: 5] [min: 1] total number of runs to do when running Train - NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"` + // total number of runs to do when running Train + NRuns int `def:"5" min:"1"` - // [def: 100] total number of epochs per run - NEpochs int `def:"100" desc:"total number of epochs per run"` + // total number of epochs per run + NEpochs int `def:"100"` - // [def: 2] stop run after this number of perfect, zero-error epochs - NZero int `def:"2" desc:"stop run after this number of perfect, zero-error epochs"` + // stop run after this number of perfect, zero-error epochs + NZero int `def:"2"` - // [def: 32] total number of trials per epoch. Should be an even multiple of NData. - NTrials int `def:"32" desc:"total number of trials per epoch. Should be an even multiple of NData."` + // total number of trials per epoch. Should be an even multiple of NData. + NTrials int `def:"32"` - // [def: 5] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing - TestInterval int `def:"5" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"` + // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing + TestInterval int `def:"5"` - // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance? - PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"` + // how frequently (in epochs) to compute PCA on hidden representations to measure variance? + PCAInterval int `def:"5"` // if non-empty, is the name of weights file to load at start of first run -- for testing - StartWts string `desc:"if non-empty, is the name of weights file to load at start of first run -- for testing"` + StartWts string } // LogConfig has config parameters related to logging data type LogConfig struct { // if true, save final weights after each run - SaveWts bool `desc:"if true, save final weights after each run"` + SaveWts bool - // [def: true] if true, save train epoch log to file, as .epc.tsv typically - Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"` + // if true, save train epoch log to file, as .epc.tsv typically + Epoch bool `def:"true" nest:"+"` - // [def: true] if true, save run log to file, as .run.tsv typically - Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"` + // if true, save run log to file, as .run.tsv typically + Run bool `def:"true" nest:"+"` - // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large. - Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."` + // if true, save train trial log to file, as .trl.tsv typically. May be large. + Trial bool `def:"false" nest:"+"` - // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there. - TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."` + // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there. + TestEpoch bool `def:"false" nest:"+"` - // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large. - TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."` + // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large. + TestTrial bool `def:"false" nest:"+"` // if true, save network activation etc data from testing trials, for later viewing in netview - NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"` + NetData bool } // Config is a standard Sim config -- use as a starting point. type Config struct { // specify include files here, and after configuration, it contains list of include files added - Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"` + Includes []string - // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits - GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"` + // open the GUI -- does not automatically run -- if false, then runs automatically and quits + GUI bool `def:"true"` // log debugging information - Debug bool `desc:"log debugging information"` + Debug bool - // [view: add-fields] parameter related configuration options - Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"` + // parameter related configuration options + Params ParamConfig `view:"add-fields"` - // [view: add-fields] sim running related configuration options - Run RunConfig `view:"add-fields" desc:"sim running related configuration options"` + // sim running related configuration options + Run RunConfig `view:"add-fields"` - // [view: add-fields] data logging related configuration options - Log LogConfig `view:"add-fields" desc:"data logging related configuration options"` + // data logging related configuration options + Log LogConfig `view:"add-fields"` } func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes } @@ -172,40 +172,40 @@ func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes } type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] network parameter management - Params emer.NetParams `view:"inline" desc:"network parameter management"` + // network parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] the training patterns to use - Pats *etable.Table `view:"no-inline" desc:"the training patterns to use"` + // the training patterns to use + Pats *etable.Table `view:"no-inline"` - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults @@ -650,9 +650,9 @@ func (ss *Sim) Log(mode etime.Modes, time etime.Times) { // Gui // ConfigGui configures the GoGi gui interface for this simulation, -func (ss *Sim) ConfigGui() *gi.Window { +func (ss *Sim) ConfigGui() { title := "Axon Random Associator" - ss.GUI.MakeWindow(ss, "ra25", title, `This demonstrates a basic Axon model. See emergent on GitHub.

`) + ss.GUI.MakeBody(ss, "ra25", title, `This demonstrates a basic Axon model. See emergent on GitHub.

`) ss.GUI.CycleUpdateInterval = 10 nv := ss.GUI.AddNetView("NetView") @@ -678,7 +678,7 @@ func (ss *Sim) ConfigGui() *gi.Window { ss.GUI.AddLooperCtrl(ss.Loops, []etime.Modes{etime.Train, etime.Test}) //////////////////////////////////////////////// - ss.GUI.ToolBar.AddSeparator("log") + gi.NewSeparator(ss.GUI.Toolbar) ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "Reset RunLog", Icon: "reset", Tooltip: "Reset the accumulated log of all Runs, which are tagged with the ParamSet used", @@ -689,7 +689,7 @@ func (ss *Sim) ConfigGui() *gi.Window { }, }) //////////////////////////////////////////////// - ss.GUI.ToolBar.AddSeparator("misc") + gi.NewSeparator(ss.GUI.Toolbar) ss.GUI.AddToolbarItem(egui.ToolbarItem{Label: "New Seed", Icon: "new", Tooltip: "Generate a new initial random seed to get different results. By default, Init re-establishes the same initial seed every time.", @@ -714,13 +714,12 @@ func (ss *Sim) ConfigGui() *gi.Window { ss.Net.GPU.Destroy() }) } - return ss.GUI.Win } func (ss *Sim) RunGUI() { ss.Init() - win := ss.ConfigGui() - win.StartEventLoop() + ss.ConfigGui() + ss.GUI.Body.NewWindow().Run() } func (ss *Sim) RunNoGUI() { diff --git a/examples/ra25/weights_test.go b/examples/ra25/weights_test.go index e99292b14..774486dd3 100644 --- a/examples/ra25/weights_test.go +++ b/examples/ra25/weights_test.go @@ -7,8 +7,8 @@ import ( "os" "testing" - "github.com/emer/emergent/etime" - "github.com/goki/gi/gi" + "github.com/emer/emergent/v2/etime" + "goki.dev/gi/v2/gi" ) func TestWeightsSave(t *testing.T) { diff --git a/examples/ra25x/config.go b/examples/ra25x/config.go index 6b777b113..a9f774a4f 100644 --- a/examples/ra25x/config.go +++ b/examples/ra25x/config.go @@ -4,121 +4,121 @@ package main -import "github.com/emer/emergent/evec" +import "github.com/emer/emergent/v2/evec" // ParamConfig has config parameters related to sim params type ParamConfig struct { // network parameters - Network map[string]any `desc:"network parameters"` + Network map[string]any - // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers - Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"` + // size of hidden layer -- can use emer.LaySize for 4D layers + Hidden1Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"` - // [def: {'X':10,'Y':10}] size of hidden layer -- can use emer.LaySize for 4D layers - Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+" desc:"size of hidden layer -- can use emer.LaySize for 4D layers"` + // size of hidden layer -- can use emer.LaySize for 4D layers + Hidden2Size evec.Vec2i `def:"{'X':10,'Y':10}" nest:"+"` // Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params - Sheet string `desc:"Extra Param Sheet name(s) to use (space separated if multiple) -- must be valid name as listed in compiled-in params or loaded params"` + Sheet string // extra tag to add to file names and logs saved from this run - Tag string `desc:"extra tag to add to file names and logs saved from this run"` + Tag string // user note -- describe the run params etc -- like a git commit message for the run - Note string `desc:"user note -- describe the run params etc -- like a git commit message for the run"` + Note string // Name of the JSON file to input saved parameters from. - File string `nest:"+" desc:"Name of the JSON file to input saved parameters from."` + File string `nest:"+"` // Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params - SaveAll bool `nest:"+" desc:"Save a snapshot of all current param and config settings in a directory named params_ (or _good if Good is true), then quit -- useful for comparing to later changes and seeing multiple views of current params"` + SaveAll bool `nest:"+"` // for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time. - Good bool `nest:"+" desc:"for SaveAll, save to params_good for a known good params state. This can be done prior to making a new release after all tests are passing -- add results to git to provide a full diff record of all params over time."` + Good bool `nest:"+"` } // RunConfig has config parameters related to running the sim type RunConfig struct { - // [def: true] use the GPU for computation -- generally faster even for small models if NData ~16 - GPU bool `def:"true" desc:"use the GPU for computation -- generally faster even for small models if NData ~16"` + // use the GPU for computation -- generally faster even for small models if NData ~16 + GPU bool `def:"true"` - // [def: 16] [min: 1] number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. - NData int `def:"16" min:"1" desc:"number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning."` + // number of data-parallel items to process in parallel per trial -- works (and is significantly faster) for both CPU and GPU. Results in an effective mini-batch of learning. + NData int `def:"16" min:"1"` - // [def: 0] number of parallel threads for CPU computation -- 0 = use default - NThreads int `def:"0" desc:"number of parallel threads for CPU computation -- 0 = use default"` + // number of parallel threads for CPU computation -- 0 = use default + NThreads int `def:"0"` - // [def: 0] starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1 - Run int `def:"0" desc:"starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1"` + // starting run number -- determines the random seed -- runs counts from there -- can do all runs in parallel by launching separate jobs with each run, runs = 1 + Run int `def:"0"` - // [def: 5] [min: 1] total number of runs to do when running Train - NRuns int `def:"5" min:"1" desc:"total number of runs to do when running Train"` + // total number of runs to do when running Train + NRuns int `def:"5" min:"1"` - // [def: 1000] total number of epochs per run - NEpochs int `def:"1000" desc:"total number of epochs per run"` + // total number of epochs per run + NEpochs int `def:"1000"` - // [def: 2] stop run after this number of perfect, zero-error epochs - NZero int `def:"2" desc:"stop run after this number of perfect, zero-error epochs"` + // stop run after this number of perfect, zero-error epochs + NZero int `def:"2"` - // [def: 32] total number of trials per epoch. Should be an even multiple of NData. - NTrials int `def:"32" desc:"total number of trials per epoch. Should be an even multiple of NData."` + // total number of trials per epoch. Should be an even multiple of NData. + NTrials int `def:"32"` - // [def: 5] how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing - TestInterval int `def:"5" desc:"how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing"` + // how often to run through all the test patterns, in terms of training epochs -- can use 0 or -1 for no testing + TestInterval int `def:"5"` - // [def: 5] how frequently (in epochs) to compute PCA on hidden representations to measure variance? - PCAInterval int `def:"5" desc:"how frequently (in epochs) to compute PCA on hidden representations to measure variance?"` + // how frequently (in epochs) to compute PCA on hidden representations to measure variance? + PCAInterval int `def:"5"` // if non-empty, is the name of weights file to load at start of first run -- for testing - StartWts string `desc:"if non-empty, is the name of weights file to load at start of first run -- for testing"` + StartWts string } // LogConfig has config parameters related to logging data type LogConfig struct { // if true, save final weights after each run - SaveWts bool `desc:"if true, save final weights after each run"` + SaveWts bool - // [def: true] if true, save train epoch log to file, as .epc.tsv typically - Epoch bool `def:"true" nest:"+" desc:"if true, save train epoch log to file, as .epc.tsv typically"` + // if true, save train epoch log to file, as .epc.tsv typically + Epoch bool `def:"true" nest:"+"` - // [def: true] if true, save run log to file, as .run.tsv typically - Run bool `def:"true" nest:"+" desc:"if true, save run log to file, as .run.tsv typically"` + // if true, save run log to file, as .run.tsv typically + Run bool `def:"true" nest:"+"` - // [def: false] if true, save train trial log to file, as .trl.tsv typically. May be large. - Trial bool `def:"false" nest:"+" desc:"if true, save train trial log to file, as .trl.tsv typically. May be large."` + // if true, save train trial log to file, as .trl.tsv typically. May be large. + Trial bool `def:"false" nest:"+"` - // [def: false] if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there. - TestEpoch bool `def:"false" nest:"+" desc:"if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there."` + // if true, save testing epoch log to file, as .tst_epc.tsv typically. In general it is better to copy testing items over to the training epoch log and record there. + TestEpoch bool `def:"false" nest:"+"` - // [def: false] if true, save testing trial log to file, as .tst_trl.tsv typically. May be large. - TestTrial bool `def:"false" nest:"+" desc:"if true, save testing trial log to file, as .tst_trl.tsv typically. May be large."` + // if true, save testing trial log to file, as .tst_trl.tsv typically. May be large. + TestTrial bool `def:"false" nest:"+"` // if true, save network activation etc data from testing trials, for later viewing in netview - NetData bool `desc:"if true, save network activation etc data from testing trials, for later viewing in netview"` + NetData bool } // Config is a standard Sim config -- use as a starting point. type Config struct { // specify include files here, and after configuration, it contains list of include files added - Includes []string `desc:"specify include files here, and after configuration, it contains list of include files added"` + Includes []string - // [def: true] open the GUI -- does not automatically run -- if false, then runs automatically and quits - GUI bool `def:"true" desc:"open the GUI -- does not automatically run -- if false, then runs automatically and quits"` + // open the GUI -- does not automatically run -- if false, then runs automatically and quits + GUI bool `def:"true"` // log debugging information - Debug bool `desc:"log debugging information"` + Debug bool - // [view: add-fields] parameter related configuration options - Params ParamConfig `view:"add-fields" desc:"parameter related configuration options"` + // parameter related configuration options + Params ParamConfig `view:"add-fields"` - // [view: add-fields] sim running related configuration options - Run RunConfig `view:"add-fields" desc:"sim running related configuration options"` + // sim running related configuration options + Run RunConfig `view:"add-fields"` - // [view: add-fields] data logging related configuration options - Log LogConfig `view:"add-fields" desc:"data logging related configuration options"` + // data logging related configuration options + Log LogConfig `view:"add-fields"` } func (cfg *Config) IncludesPtr() *[]string { return &cfg.Includes } diff --git a/examples/ra25x/params.go b/examples/ra25x/params.go index f45cf278b..704c994c1 100644 --- a/examples/ra25x/params.go +++ b/examples/ra25x/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets sets the minimal non-default params diff --git a/examples/ra25x/ra25x.go b/examples/ra25x/ra25x.go index 15eceddde..83cf59b15 100644 --- a/examples/ra25x/ra25x.go +++ b/examples/ra25x/ra25x.go @@ -13,27 +13,27 @@ import ( "os" "github.com/emer/axon/axon" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/patgen" - "github.com/emer/emergent/prjn" - "github.com/emer/empi/mpi" - "github.com/emer/etable/agg" - "github.com/emer/etable/etable" - "github.com/emer/etable/etensor" - "github.com/emer/etable/minmax" - "github.com/emer/etable/tsragg" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/patgen" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/empi/v2/mpi" + "goki.dev/etable/v2/agg" + "goki.dev/etable/v2/etable" + "goki.dev/etable/v2/etensor" + "goki.dev/etable/v2/minmax" + "goki.dev/etable/v2/tsragg" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" + "goki.dev/mat32/v2" ) func main() { @@ -57,40 +57,40 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] network parameter management - Params emer.NetParams `view:"inline" desc:"network parameter management"` + // network parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] the training patterns to use - Pats *etable.Table `view:"no-inline" desc:"the training patterns to use"` + // the training patterns to use + Pats *etable.Table `view:"no-inline"` - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/examples/rl/cond_env.go b/examples/rl/cond_env.go index 9ce91e6e6..5a22eb2b6 100644 --- a/examples/rl/cond_env.go +++ b/examples/rl/cond_env.go @@ -8,37 +8,37 @@ import ( "fmt" "math/rand" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/etable/etensor" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "goki.dev/etable/v2/etensor" ) // OnOff represents stimulus On / Off timing type OnOff struct { // is this stimulus active -- use it? - Act bool `desc:"is this stimulus active -- use it?"` + Act bool // when stimulus turns on - On int `desc:"when stimulus turns on"` + On int // when stimulu turns off - Off int `desc:"when stimulu turns off"` + Off int // probability of being active on any given trial - P float32 `desc:"probability of being active on any given trial"` + P float32 // variability in onset timing (max number of trials before/after On that it could start) - OnVar int `desc:"variability in onset timing (max number of trials before/after On that it could start)"` + OnVar int // variability in offset timing (max number of trials before/after Off that it could end) - OffVar int `desc:"variability in offset timing (max number of trials before/after Off that it could end)"` + OffVar int - // [view: -] current active status based on P probability - CurAct bool `view:"-" desc:"current active status based on P probability"` + // current active status based on P probability + CurAct bool `view:"-"` - // [view: -] current on / off values using Var variability - CurOn, CurOff int `view:"-" desc:"current on / off values using Var variability"` + // current on / off values using Var variability + CurOn, CurOff int `view:"-"` } func (oo *OnOff) Set(act bool, on, off int) { @@ -68,52 +68,52 @@ func (oo *OnOff) IsOn(tm int) bool { type CondEnv struct { // name of this environment - Nm string `desc:"name of this environment"` + Nm string // description of this environment - Dsc string `desc:"description of this environment"` + Dsc string // total time for trial - TotTime int `desc:"total time for trial"` + TotTime int - // [view: inline] Conditioned stimulus A (e.g., Tone) - CSA OnOff `view:"inline" desc:"Conditioned stimulus A (e.g., Tone)"` + // Conditioned stimulus A (e.g., Tone) + CSA OnOff `view:"inline"` - // [view: inline] Conditioned stimulus B (e.g., Light) - CSB OnOff `view:"inline" desc:"Conditioned stimulus B (e.g., Light)"` + // Conditioned stimulus B (e.g., Light) + CSB OnOff `view:"inline"` - // [view: inline] Conditioned stimulus C - CSC OnOff `view:"inline" desc:"Conditioned stimulus C"` + // Conditioned stimulus C + CSC OnOff `view:"inline"` - // [view: inline] Unconditioned stimulus -- reward - US OnOff `view:"inline" desc:"Unconditioned stimulus -- reward"` + // Unconditioned stimulus -- reward + US OnOff `view:"inline"` // value for reward - RewVal float32 `desc:"value for reward"` + RewVal float32 // value for non-reward - NoRewVal float32 `desc:"value for non-reward"` + NoRewVal float32 // one-hot input representation of current option - Input etensor.Float64 `desc:"one-hot input representation of current option"` + Input etensor.Float64 // single reward value - Reward etensor.Float64 `desc:"single reward value"` + Reward etensor.Float64 // true if a US reward value was set - HasRew bool `desc:"true if a US reward value was set"` + HasRew bool - // [view: inline] current run of model as provided during Init - Run env.Ctr `view:"inline" desc:"current run of model as provided during Init"` + // current run of model as provided during Init + Run env.Ctr `view:"inline"` - // [view: inline] number of times through Seq.Max number of sequences - Epoch env.Ctr `view:"inline" desc:"number of times through Seq.Max number of sequences"` + // number of times through Seq.Max number of sequences + Epoch env.Ctr `view:"inline"` - // [view: inline] one trial is a pass through all TotTime Events - Trial env.Ctr `view:"inline" desc:"one trial is a pass through all TotTime Events"` + // one trial is a pass through all TotTime Events + Trial env.Ctr `view:"inline"` - // [view: inline] event is one time step within Trial -- e.g., CS turning on, etc - Event env.Ctr `view:"inline" desc:"event is one time step within Trial -- e.g., CS turning on, etc"` + // event is one time step within Trial -- e.g., CS turning on, etc + Event env.Ctr `view:"inline"` } func (ev *CondEnv) Name() string { return ev.Nm } diff --git a/examples/rl/params.go b/examples/rl/params.go index f5c1a58a6..930511125 100644 --- a/examples/rl/params.go +++ b/examples/rl/params.go @@ -5,8 +5,8 @@ package main import ( - "github.com/emer/emergent/netparams" - "github.com/emer/emergent/params" + "github.com/emer/emergent/v2/netparams" + "github.com/emer/emergent/v2/params" ) // ParamSets is the default set of parameters -- Base is always applied, and others can be optionally diff --git a/examples/rl/rl.go b/examples/rl/rl.go index 1e271a8ff..9c429f52c 100644 --- a/examples/rl/rl.go +++ b/examples/rl/rl.go @@ -11,22 +11,22 @@ import ( "os" "github.com/emer/axon/axon" - "github.com/emer/emergent/econfig" - "github.com/emer/emergent/egui" - "github.com/emer/emergent/elog" - "github.com/emer/emergent/emer" - "github.com/emer/emergent/env" - "github.com/emer/emergent/erand" - "github.com/emer/emergent/estats" - "github.com/emer/emergent/etime" - "github.com/emer/emergent/looper" - "github.com/emer/emergent/netview" - "github.com/emer/emergent/params" - "github.com/emer/emergent/prjn" - "github.com/emer/emergent/relpos" - "github.com/emer/empi/mpi" - "github.com/goki/gi/gi" - "github.com/goki/gi/gimain" + "github.com/emer/emergent/v2/econfig" + "github.com/emer/emergent/v2/egui" + "github.com/emer/emergent/v2/elog" + "github.com/emer/emergent/v2/emer" + "github.com/emer/emergent/v2/env" + "github.com/emer/emergent/v2/erand" + "github.com/emer/emergent/v2/estats" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" + "github.com/emer/emergent/v2/netview" + "github.com/emer/emergent/v2/params" + "github.com/emer/emergent/v2/prjn" + "github.com/emer/emergent/v2/relpos" + "github.com/emer/empi/v2/mpi" + "goki.dev/gi/v2/gi" + "goki.dev/gi/v2/gimain" ) func main() { @@ -50,37 +50,37 @@ func main() { type Sim struct { // simulation configuration parameters -- set by .toml config file and / or args - Config Config `desc:"simulation configuration parameters -- set by .toml config file and / or args"` + Config Config - // [view: no-inline] the network -- click to view / edit parameters for layers, prjns, etc - Net *axon.Network `view:"no-inline" desc:"the network -- click to view / edit parameters for layers, prjns, etc"` + // the network -- click to view / edit parameters for layers, prjns, etc + Net *axon.Network `view:"no-inline"` - // [view: inline] all parameter management - Params emer.NetParams `view:"inline" desc:"all parameter management"` + // all parameter management + Params emer.NetParams `view:"inline"` - // [view: no-inline] contains looper control loops for running sim - Loops *looper.Manager `view:"no-inline" desc:"contains looper control loops for running sim"` + // contains looper control loops for running sim + Loops *looper.Manager `view:"no-inline"` // contains computed statistic values - Stats estats.Stats `desc:"contains computed statistic values"` + Stats estats.Stats // Contains all the logs and information about the logs.' - Logs elog.Logs `desc:"Contains all the logs and information about the logs.'"` + Logs elog.Logs - // [view: no-inline] Environments - Envs env.Envs `view:"no-inline" desc:"Environments"` + // Environments + Envs env.Envs `view:"no-inline"` // axon timing parameters and state - Context axon.Context `desc:"axon timing parameters and state"` + Context axon.Context - // [view: inline] netview update parameters - ViewUpdt netview.ViewUpdt `view:"inline" desc:"netview update parameters"` + // netview update parameters + ViewUpdt netview.ViewUpdt `view:"inline"` - // [view: -] manages all the gui elements - GUI egui.GUI `view:"-" desc:"manages all the gui elements"` + // manages all the gui elements + GUI egui.GUI `view:"-"` - // [view: -] a list of random seeds to use for each run - RndSeeds erand.Seeds `view:"-" desc:"a list of random seeds to use for each run"` + // a list of random seeds to use for each run + RndSeeds erand.Seeds `view:"-"` } // New creates new blank elements and initializes defaults diff --git a/fffb/inhib.go b/fffb/inhib.go index e4c65316d..11fdb8c7b 100644 --- a/fffb/inhib.go +++ b/fffb/inhib.go @@ -4,31 +4,31 @@ package fffb -import "github.com/emer/etable/minmax" +import "goki.dev/etable/v2/minmax" // Inhib contains state values for computed FFFB inhibition type Inhib struct { // computed feedforward inhibition - FFi float32 `desc:"computed feedforward inhibition"` + FFi float32 // computed feedback inhibition (total) - FBi float32 `desc:"computed feedback inhibition (total)"` + FBi float32 // overall value of the FFFB computed inhibition -- this is what is added into the unit Gi inhibition level (along with GiBg and any synaptic unit-driven inhibition) - Gi float32 `desc:"overall value of the FFFB computed inhibition -- this is what is added into the unit Gi inhibition level (along with GiBg and any synaptic unit-driven inhibition)"` + Gi float32 // original value of the inhibition (before pool or other effects) - GiOrig float32 `desc:"original value of the inhibition (before pool or other effects)"` + GiOrig float32 // for pools, this is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition - LayGi float32 `desc:"for pools, this is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition"` + LayGi float32 // average and max Ge excitatory conductance values, which drive FF inhibition - Ge minmax.AvgMax32 `desc:"average and max Ge excitatory conductance values, which drive FF inhibition"` + Ge minmax.AvgMax32 // average and max Act activation values, which drive FB inhibition - Act minmax.AvgMax32 `desc:"average and max Act activation values, which drive FB inhibition"` + Act minmax.AvgMax32 } func (fi *Inhib) Init() { diff --git a/fsfffb/inhib.go b/fsfffb/inhib.go index e7e689744..81eeb394e 100644 --- a/fsfffb/inhib.go +++ b/fsfffb/inhib.go @@ -8,7 +8,7 @@ import ( "log" "github.com/goki/gosl/slbool" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //gosl: start fsfffb @@ -17,64 +17,64 @@ import ( type Inhib struct { // all feedforward incoming spikes into neurons in this pool -- raw aggregation - FFsRaw float32 `desc:"all feedforward incoming spikes into neurons in this pool -- raw aggregation"` + FFsRaw float32 // all feedback outgoing spikes generated from neurons in this pool -- raw aggregation - FBsRaw float32 `desc:"all feedback outgoing spikes generated from neurons in this pool -- raw aggregation"` + FBsRaw float32 // all extra GeExt conductances added to neurons - GeExtRaw float32 `desc:"all extra GeExt conductances added to neurons"` + GeExtRaw float32 // all feedforward incoming spikes into neurons in this pool, normalized by pool size - FFs float32 `desc:"all feedforward incoming spikes into neurons in this pool, normalized by pool size"` + FFs float32 // all feedback outgoing spikes generated from neurons in this pool, normalized by pool size - FBs float32 `desc:"all feedback outgoing spikes generated from neurons in this pool, normalized by pool size"` + FBs float32 // all extra GeExt conductances added to neurons, normalized by pool size - GeExts float32 `desc:"all extra GeExt conductances added to neurons, normalized by pool size"` + GeExts float32 // if true, this layer is hard-clamped and should use GeExts exclusively for PV - Clamped slbool.Bool `desc:"if true, this layer is hard-clamped and should use GeExts exclusively for PV"` + Clamped slbool.Bool // fast spiking PV+ fast integration of FFs feedforward spikes - FSi float32 `desc:"fast spiking PV+ fast integration of FFs feedforward spikes"` + FSi float32 // slow spiking SST+ integration of FBs feedback spikes - SSi float32 `desc:"slow spiking SST+ integration of FBs feedback spikes"` + SSi float32 // slow spiking facilitation factor, representing facilitating effects of recent activity - SSf float32 `desc:"slow spiking facilitation factor, representing facilitating effects of recent activity"` + SSf float32 // overall fast-spiking inhibitory conductance - FSGi float32 `desc:"overall fast-spiking inhibitory conductance"` + FSGi float32 // overall slow-spiking inhibitory conductance - SSGi float32 `desc:"overall slow-spiking inhibitory conductance"` + SSGi float32 // overall inhibitory conductance = FSGi + SSGi - Gi float32 `desc:"overall inhibitory conductance = FSGi + SSGi"` + Gi float32 // original value of the inhibition (before pool or other effects) - GiOrig float32 `desc:"original value of the inhibition (before pool or other effects)"` + GiOrig float32 // for pools, this is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition - LayGi float32 `desc:"for pools, this is the layer-level inhibition that is MAX'd with the pool-level inhibition to produce the net inhibition"` + LayGi float32 // longer time scale running average FF drive -- used for FFAvgPrv - FFAvg float32 `desc:"longer time scale running average FF drive -- used for FFAvgPrv"` + FFAvg float32 // previous theta cycle FFAvg value -- for FFPrv factor -- updated in Decay function that is called at start of new ThetaCycle - FFAvgPrv float32 `desc:"previous theta cycle FFAvg value -- for FFPrv factor -- updated in Decay function that is called at start of new ThetaCycle"` + FFAvgPrv float32 // int32 atomic add compatible integration of FFsRaw - FFsRawInt int32 `desc:"int32 atomic add compatible integration of FFsRaw"` + FFsRawInt int32 // int32 atomic add compatible integration of FBsRaw - FBsRawInt int32 `desc:"int32 atomic add compatible integration of FBsRaw"` + FBsRawInt int32 // int32 atomic add compatible integration of GeExtRaw - GeExtRawInt int32 `desc:"int32 atomic add compatible integration of GeExtRaw"` + GeExtRawInt int32 } func (fi *Inhib) Init() { diff --git a/go.mod b/go.mod index b085674d5..593a9a757 100644 --- a/go.mod +++ b/go.mod @@ -1,77 +1,5 @@ module github.com/emer/axon -go 1.20 +go 1.21.0 -require ( - github.com/alecthomas/assert/v2 v2.3.0 - github.com/anthonynsimon/bild v0.13.0 - github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b - github.com/emer/emergent v1.4.31 - github.com/emer/empi v1.0.22 - github.com/emer/etable v1.1.24 - github.com/emer/eve v0.9.5 - github.com/emer/leabra v1.2.7 - github.com/emer/vision v1.1.18 - github.com/goki/gi v1.3.25 - github.com/goki/gosl v1.0.17 - github.com/goki/ki v1.1.17 - github.com/goki/kigen v1.0.2 - github.com/goki/mat32 v1.0.18 - github.com/goki/vgpu v1.0.34 - github.com/goki/vulkan v1.0.7 - github.com/stretchr/testify v1.8.4 - gitlab.com/gomidi/midi/v2 v2.0.30 - golang.org/x/exp v0.0.0-20231006140011-7918f672742d -) -require ( - git.sr.ht/~sbinet/gg v0.3.1 // indirect - github.com/BurntSushi/freetype-go v0.0.0-20160129220410-b763ddbfe298 // indirect - github.com/BurntSushi/graphics-go v0.0.0-20160129215708-b43f31a4a966 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect - github.com/BurntSushi/xgb v0.0.0-20210121224620-deaf085860bc // indirect - github.com/BurntSushi/xgbutil v0.0.0-20190907113008-ad855c713046 // indirect - github.com/Masterminds/vcs v1.13.3 // indirect - github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b // indirect - github.com/akutz/sortfold v0.2.1 // indirect - github.com/alecthomas/chroma/v2 v2.9.1 // indirect - github.com/alecthomas/repr v0.2.0 // indirect - github.com/antonmedv/expr v1.15.3 // indirect - github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 // indirect - github.com/aymerick/douceur v0.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dlclark/regexp2 v1.10.0 // indirect - github.com/fatih/camelcase v1.0.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.3 // indirect - github.com/go-fonts/liberation v0.3.0 // indirect - github.com/go-gl/glfw/v3.3/glfw v0.0.0-20221017161538-93cebf72946b // indirect - github.com/go-gl/mathgl v1.1.0 // indirect - github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9 // indirect - github.com/go-pdf/fpdf v0.7.0 // indirect - github.com/goki/freetype v1.0.1 // indirect - github.com/goki/go-difflib v1.2.1 // indirect - github.com/goki/pi v1.0.28 // indirect - github.com/goki/prof v1.0.1 // indirect - github.com/goki/vci v1.0.2 // indirect - github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect - github.com/gorilla/css v1.0.0 // indirect - github.com/h2non/filetype v1.1.3 // indirect - github.com/hexops/gotextdiff v1.0.3 // indirect - github.com/iancoleman/strcase v0.3.0 // indirect - github.com/jinzhu/copier v0.4.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef // indirect - github.com/srwiley/scanx v0.0.0-20190309010443-e94503791388 // indirect - golang.org/x/image v0.13.0 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.14.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - gonum.org/v1/gonum v0.12.0 // indirect - gonum.org/v1/plot v0.12.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) diff --git a/go.sum b/go.sum index 56ae83869..eac2b1f47 100644 --- a/go.sum +++ b/go.sum @@ -108,8 +108,8 @@ github.com/go-pdf/fpdf v0.7.0 h1:Kgf56ewNyhYcv6LIbhDWGRF91+e4aGMjpQlabnZnz9Q= github.com/go-pdf/fpdf v0.7.0/go.mod h1:gfqhcNwXrsd3XYKte9a7vM3smvU/jB4ZRDrmWSxpfdc= github.com/goki/freetype v1.0.1 h1:10DgpEu+QEh/hpvAxgx//RT8ayWwHJI+nZj3QNcn8uk= github.com/goki/freetype v1.0.1/go.mod h1:ni9Dgz8vA6o+13u1Ke0q3kJcCJ9GuXb1dtlfKho98vs= -github.com/goki/gi v1.3.25 h1:ujr3BIGRx0EWo9b2MmPuNj5AunHrTUXoWnDZRv9jy6k= -github.com/goki/gi v1.3.25/go.mod h1:bAhqeKTmBYCzO03c1GAHjgOeQPdQzBN21HA0UsKddtU= +goki.dev/gi/v2 v1.3.25 h1:ujr3BIGRx0EWo9b2MmPuNj5AunHrTUXoWnDZRv9jy6k= +goki.dev/gi/v2 v1.3.25/go.mod h1:bAhqeKTmBYCzO03c1GAHjgOeQPdQzBN21HA0UsKddtU= github.com/goki/go-difflib v1.2.1 h1:zqSi9rTf0vYFia92PaZeKrTfofGVqku2WYOtfsUYqxU= github.com/goki/go-difflib v1.2.1/go.mod h1:uZuY072AYTnMjRxCn6IkpZQKRVcTj4SIpHHXOUGOxrg= github.com/goki/gosl v1.0.17 h1:lr/CSjflbrrUWTZF9/S0BvN48lZXuuAsGAC+0LvNuBI= diff --git a/interinhib/interinhib.go b/interinhib/interinhib.go index ac11e41ad..7ed12543c 100644 --- a/interinhib/interinhib.go +++ b/interinhib/interinhib.go @@ -19,8 +19,8 @@ package interinhib import ( "github.com/emer/axon/axon" - "github.com/emer/emergent/emer" - "github.com/goki/mat32" + "github.com/emer/emergent/v2/emer" + "goki.dev/mat32/v2" ) // InterInhib specifies inhibition between layers, where @@ -29,13 +29,13 @@ import ( type InterInhib struct { // layers to receive inhibition from - Lays emer.LayNames `desc:"layers to receive inhibition from"` + Lays emer.LayNames // multiplier on Gi from other layers - Gi float32 `desc:"multiplier on Gi from other layers"` + Gi float32 // add inhibition -- otherwise Max - Add bool `desc:"add inhibition -- otherwise Max"` + Add bool } func (il *InterInhib) Defaults() { diff --git a/kinase/params.go b/kinase/params.go index 6b986b33e..086f81eea 100644 --- a/kinase/params.go +++ b/kinase/params.go @@ -6,7 +6,7 @@ package kinase import ( "github.com/goki/gosl/slbool" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) //gosl: start kinase @@ -16,35 +16,35 @@ import ( // timescales for LTP potentiation vs. LTD depression factors. type CaDtParams struct { - // [def: 2,5] [min: 1] CaM (calmodulin) time constant in cycles (msec) -- for synaptic-level integration this integrates on top of Ca signal from send->CaSyn * recv->CaSyn, each of which are typically integrated with a 30 msec Tau. - MTau float32 `def:"2,5" min:"1" desc:"CaM (calmodulin) time constant in cycles (msec) -- for synaptic-level integration this integrates on top of Ca signal from send->CaSyn * recv->CaSyn, each of which are typically integrated with a 30 msec Tau."` + // CaM (calmodulin) time constant in cycles (msec) -- for synaptic-level integration this integrates on top of Ca signal from send->CaSyn * recv->CaSyn, each of which are typically integrated with a 30 msec Tau. + MTau float32 `def:"2,5" min:"1"` - // [def: 39] [min: 1] LTP spike-driven Ca factor (CaP) time constant in cycles (msec), simulating CaMKII in the Kinase framework, with 40 on top of MTau roughly tracking the biophysical rise time. Computationally, CaP represents the plus phase learning signal that reflects the most recent past information. - PTau float32 `def:"39" min:"1" desc:"LTP spike-driven Ca factor (CaP) time constant in cycles (msec), simulating CaMKII in the Kinase framework, with 40 on top of MTau roughly tracking the biophysical rise time. Computationally, CaP represents the plus phase learning signal that reflects the most recent past information."` + // LTP spike-driven Ca factor (CaP) time constant in cycles (msec), simulating CaMKII in the Kinase framework, with 40 on top of MTau roughly tracking the biophysical rise time. Computationally, CaP represents the plus phase learning signal that reflects the most recent past information. + PTau float32 `def:"39" min:"1"` - // [def: 41] [min: 1] LTD spike-driven Ca factor (CaD) time constant in cycles (msec), simulating DAPK1 in Kinase framework. Computationally, CaD represents the minus phase learning signal that reflects the expectation representation prior to experiencing the outcome (in addition to the outcome). For integration equations, this cannot be identical to PTau. - DTau float32 `def:"41" min:"1" desc:"LTD spike-driven Ca factor (CaD) time constant in cycles (msec), simulating DAPK1 in Kinase framework. Computationally, CaD represents the minus phase learning signal that reflects the expectation representation prior to experiencing the outcome (in addition to the outcome). For integration equations, this cannot be identical to PTau."` + // LTD spike-driven Ca factor (CaD) time constant in cycles (msec), simulating DAPK1 in Kinase framework. Computationally, CaD represents the minus phase learning signal that reflects the expectation representation prior to experiencing the outcome (in addition to the outcome). For integration equations, this cannot be identical to PTau. + DTau float32 `def:"41" min:"1"` // if true, adjust dt time constants when using exponential integration equations to compensate for difference between discrete and continuous integration - ExpAdj slbool.Bool `desc:"if true, adjust dt time constants when using exponential integration equations to compensate for difference between discrete and continuous integration"` + ExpAdj slbool.Bool - // [view: -] rate = 1 / tau - MDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"` + // rate = 1 / tau + MDt float32 `view:"-" json:"-" xml:"-" inactive:"+"` - // [view: -] rate = 1 / tau - PDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"` + // rate = 1 / tau + PDt float32 `view:"-" json:"-" xml:"-" inactive:"+"` - // [view: -] rate = 1 / tau - DDt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"rate = 1 / tau"` + // rate = 1 / tau + DDt float32 `view:"-" json:"-" xml:"-" inactive:"+"` - // [view: -] 4 * rate = 1 / tau - M4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"4 * rate = 1 / tau"` + // 4 * rate = 1 / tau + M4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+"` - // [view: -] 4 * rate = 1 / tau - P4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"4 * rate = 1 / tau"` + // 4 * rate = 1 / tau + P4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+"` - // [view: -] 4 * rate = 1 / tau - D4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+" desc:"4 * rate = 1 / tau"` + // 4 * rate = 1 / tau + D4Dt float32 `view:"-" json:"-" xml:"-" inactive:"+"` pad, pad1 int32 } @@ -109,16 +109,16 @@ func (kp *CaDtParams) CaAtT(ti int32, caM, caP, caD *float32) { // timescales for LTP potentiation vs. LTD depression factors. type CaParams struct { - // [def: 12] spiking gain factor for SynSpk learning rule variants. This alters the overall range of values, keeping them in roughly the unit scale, and affects effective learning rate. - SpikeG float32 `def:"12" desc:"spiking gain factor for SynSpk learning rule variants. This alters the overall range of values, keeping them in roughly the unit scale, and affects effective learning rate."` + // spiking gain factor for SynSpk learning rule variants. This alters the overall range of values, keeping them in roughly the unit scale, and affects effective learning rate. + SpikeG float32 `def:"12"` - // [def: 100] maximum ISI for integrating in Opt mode -- above that just set to 0 - MaxISI int32 `def:"100" desc:"maximum ISI for integrating in Opt mode -- above that just set to 0"` + // maximum ISI for integrating in Opt mode -- above that just set to 0 + MaxISI int32 `def:"100"` pad, pad1 int32 - // [view: inline] time constants for integrating at M, P, and D cascading levels - Dt CaDtParams `view:"inline" desc:"time constants for integrating at M, P, and D cascading levels"` + // time constants for integrating at M, P, and D cascading levels + Dt CaDtParams `view:"inline"` } func (kp *CaParams) Defaults() { diff --git a/kinasex/contsyn.go b/kinasex/contsyn.go index 2681ab789..beb1a7864 100644 --- a/kinasex/contsyn.go +++ b/kinasex/contsyn.go @@ -4,16 +4,16 @@ package kinasex -import "github.com/goki/mat32" +import "goki.dev/mat32/v2" // ContSyn holds extra synaptic state for continuous learning type ContSyn struct { // transitional, temporary DWt value, which is updated in a window after synaptic activity when Ca levels are still elevated, and added to the DWt value after a longer break of spiking where there is enough time for CaMKII driven AMPA receptor trafficking to take place - TDWt float32 `desc:"transitional, temporary DWt value, which is updated in a window after synaptic activity when Ca levels are still elevated, and added to the DWt value after a longer break of spiking where there is enough time for CaMKII driven AMPA receptor trafficking to take place"` + TDWt float32 // maximum CaD value since last DWt change -- DWt occurs when current CaD has decreased by a given proportion from this recent peak - CaDMax float32 `desc:"maximum CaD value since last DWt change -- DWt occurs when current CaD has decreased by a given proportion from this recent peak"` + CaDMax float32 } // VarByName returns synapse variable by name diff --git a/nxx1/nxx1.go b/nxx1/nxx1.go index bea1f93ed..e81844c9a 100644 --- a/nxx1/nxx1.go +++ b/nxx1/nxx1.go @@ -20,7 +20,7 @@ overall values. package nxx1 import ( - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) // Params are the Noisy X/(X+1) rate-coded activation function parameters. @@ -36,47 +36,47 @@ import ( // overall values. abc. type Params struct { - // [def: 0.5] threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization - Thr float32 `def:"0.5" desc:"threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization"` + // threshold value Theta (Q) for firing output activation (.5 is more accurate value based on AdEx biological parameters and normalization + Thr float32 `def:"0.5"` - // [def: 80,100,40,20] [min: 0] gain (gamma) of the rate-coded activation functions -- 100 is default, 80 works better for larger models, and 20 is closer to the actual spiking behavior of the AdEx model -- use lower values for more graded signals, generally in lower input/sensory layers of the network - Gain float32 `def:"80,100,40,20" min:"0" desc:"gain (gamma) of the rate-coded activation functions -- 100 is default, 80 works better for larger models, and 20 is closer to the actual spiking behavior of the AdEx model -- use lower values for more graded signals, generally in lower input/sensory layers of the network"` + // gain (gamma) of the rate-coded activation functions -- 100 is default, 80 works better for larger models, and 20 is closer to the actual spiking behavior of the AdEx model -- use lower values for more graded signals, generally in lower input/sensory layers of the network + Gain float32 `def:"80,100,40,20" min:"0"` - // [def: 0.005,0.01] [min: 0] variance of the Gaussian noise kernel for convolving with XX1 in NOISY_XX1 and NOISY_LINEAR -- determines the level of curvature of the activation function near the threshold -- increase for more graded responding there -- note that this is not actual stochastic noise, just constant convolved gaussian smoothness to the activation function - NVar float32 `def:"0.005,0.01" min:"0" desc:"variance of the Gaussian noise kernel for convolving with XX1 in NOISY_XX1 and NOISY_LINEAR -- determines the level of curvature of the activation function near the threshold -- increase for more graded responding there -- note that this is not actual stochastic noise, just constant convolved gaussian smoothness to the activation function"` + // variance of the Gaussian noise kernel for convolving with XX1 in NOISY_XX1 and NOISY_LINEAR -- determines the level of curvature of the activation function near the threshold -- increase for more graded responding there -- note that this is not actual stochastic noise, just constant convolved gaussian smoothness to the activation function + NVar float32 `def:"0.005,0.01" min:"0"` - // [def: 0.01] threshold on activation below which the direct vm - act.thr is used -- this should be low -- once it gets active should use net - g_e_thr ge-linear dynamics (gelin) - VmActThr float32 `def:"0.01" desc:"threshold on activation below which the direct vm - act.thr is used -- this should be low -- once it gets active should use net - g_e_thr ge-linear dynamics (gelin)"` + // threshold on activation below which the direct vm - act.thr is used -- this should be low -- once it gets active should use net - g_e_thr ge-linear dynamics (gelin) + VmActThr float32 `def:"0.01"` - // [def: 0.33] [view: -] multiplier on sigmoid used for computing values for net < thr - SigMult float32 `def:"0.33" view:"-" json:"-" xml:"-" desc:"multiplier on sigmoid used for computing values for net < thr"` + // multiplier on sigmoid used for computing values for net < thr + SigMult float32 `def:"0.33" view:"-" json:"-" xml:"-"` - // [def: 0.8] [view: -] power for computing sig_mult_eff as function of gain * nvar - SigMultPow float32 `def:"0.8" view:"-" json:"-" xml:"-" desc:"power for computing sig_mult_eff as function of gain * nvar"` + // power for computing sig_mult_eff as function of gain * nvar + SigMultPow float32 `def:"0.8" view:"-" json:"-" xml:"-"` - // [def: 3] [view: -] gain multipler on (net - thr) for sigmoid used for computing values for net < thr - SigGain float32 `def:"3" view:"-" json:"-" xml:"-" desc:"gain multipler on (net - thr) for sigmoid used for computing values for net < thr"` + // gain multipler on (net - thr) for sigmoid used for computing values for net < thr + SigGain float32 `def:"3" view:"-" json:"-" xml:"-"` - // [def: 0.01] [view: -] interpolation range above zero to use interpolation - InterpRange float32 `def:"0.01" view:"-" json:"-" xml:"-" desc:"interpolation range above zero to use interpolation"` + // interpolation range above zero to use interpolation + InterpRange float32 `def:"0.01" view:"-" json:"-" xml:"-"` - // [def: 10] [view: -] range in units of nvar over which to apply gain correction to compensate for convolution - GainCorRange float32 `def:"10" view:"-" json:"-" xml:"-" desc:"range in units of nvar over which to apply gain correction to compensate for convolution"` + // range in units of nvar over which to apply gain correction to compensate for convolution + GainCorRange float32 `def:"10" view:"-" json:"-" xml:"-"` - // [def: 0.1] [view: -] gain correction multiplier -- how much to correct gains - GainCor float32 `def:"0.1" view:"-" json:"-" xml:"-" desc:"gain correction multiplier -- how much to correct gains"` + // gain correction multiplier -- how much to correct gains + GainCor float32 `def:"0.1" view:"-" json:"-" xml:"-"` - // [view: -] sig_gain / nvar - SigGainNVar float32 `view:"-" json:"-" xml:"-" desc:"sig_gain / nvar"` + // sig_gain / nvar + SigGainNVar float32 `view:"-" json:"-" xml:"-"` - // [view: -] overall multiplier on sigmoidal component for values below threshold = sig_mult * pow(gain * nvar, sig_mult_pow) - SigMultEff float32 `view:"-" json:"-" xml:"-" desc:"overall multiplier on sigmoidal component for values below threshold = sig_mult * pow(gain * nvar, sig_mult_pow)"` + // overall multiplier on sigmoidal component for values below threshold = sig_mult * pow(gain * nvar, sig_mult_pow) + SigMultEff float32 `view:"-" json:"-" xml:"-"` - // [view: -] 0.5 * sig_mult_eff -- used for interpolation portion - SigValAt0 float32 `view:"-" json:"-" xml:"-" desc:"0.5 * sig_mult_eff -- used for interpolation portion"` + // 0.5 * sig_mult_eff -- used for interpolation portion + SigValAt0 float32 `view:"-" json:"-" xml:"-"` - // [view: -] function value at interp_range - sig_val_at_0 -- for interpolation - InterpVal float32 `view:"-" json:"-" xml:"-" desc:"function value at interp_range - sig_val_at_0 -- for interpolation"` + // function value at interp_range - sig_val_at_0 -- for interpolation + InterpVal float32 `view:"-" json:"-" xml:"-"` } func (xp *Params) Update() { diff --git a/nxx1/nxx1_test.go b/nxx1/nxx1_test.go index 914d3f7a1..b736c5afc 100644 --- a/nxx1/nxx1_test.go +++ b/nxx1/nxx1_test.go @@ -9,7 +9,7 @@ package nxx1 import ( "testing" - "github.com/goki/mat32" + "goki.dev/mat32/v2" ) // TOLERANCE is the numerical difference tolerance for comparing vs. target values