From f57a940ed68d9e788ed6c48184b104152c43c18f Mon Sep 17 00:00:00 2001
From: "Randall C. O'Reilly"
Date: Sat, 30 Nov 2024 16:31:57 -0800
Subject: [PATCH] rename of neuron variables: CaSpk* -> Ca*; CaLrn, NrnCa ->
LearnCa; Beta1,2 on GPU and fix order of Neuron indexes; consolidate all sim
metadata in config fields; deep_fsa stats in place, but something still off
relative to original.
---
Deep.md | 2 +-
GPU.md | 2 +-
PCoreBG.md | 4 +-
README.md | 59 +-
axon/act-layer.go | 85 +-
axon/act-layer.goal | 85 +-
axon/act-net.go | 66 +-
axon/act-net.goal | 66 +-
axon/act-path.go | 2 +-
axon/act-path.goal | 2 +-
axon/act.go | 22 +-
axon/act.goal | 22 +-
axon/basic_test.go | 65 +-
axon/basic_test.goal | 65 +-
axon/context.go | 12 +-
axon/deep-layer.go | 12 +-
axon/enumgen.go | 22 +-
axon/gosl.go | 86 ++
axon/layerparams.go | 4 +-
axon/layervars.go | 2 +-
axon/learn-path.go | 36 +-
axon/learn-path.goal | 36 +-
axon/learn.go | 87 +-
axon/learn.goal | 87 +-
axon/logging.go | 198 +---
axon/looper.go | 31 +-
axon/neuron.go | 457 +++++---
axon/pathtypes.go | 2 +-
axon/pool.go | 20 +-
axon/pool.goal | 20 +-
axon/rubicon-layer.go | 4 +-
axon/rubicon-layer.goal | 4 +-
axon/rubicon-net.go | 4 +-
axon/rubicon-path.go | 2 +-
axon/rubicon.go | 8 +-
axon/rubicon.goal | 8 +-
axon/shaders/ApplyExtsNeuron.wgsl | 163 ++-
axon/shaders/Beta1Neuron.wgsl | 1386 ++++++++++++++++++++++++
axon/shaders/Beta2Neuron.wgsl | 1386 ++++++++++++++++++++++++
axon/shaders/BetweenGi.wgsl | 163 ++-
axon/shaders/CycleInc.wgsl | 163 ++-
axon/shaders/CycleNeuron.wgsl | 199 ++--
axon/shaders/CyclePost.wgsl | 171 ++-
axon/shaders/DWtFromDiSyn.wgsl | 163 ++-
axon/shaders/DWtSubMeanNeuron.wgsl | 163 ++-
axon/shaders/DWtSyn.wgsl | 201 ++--
axon/shaders/GPUTestWrite.wgsl | 163 ++-
axon/shaders/GatherSpikes.wgsl | 167 ++-
axon/shaders/InitGBuffsPath.wgsl | 163 ++-
axon/shaders/LayerGi.wgsl | 163 ++-
axon/shaders/MinusPhaseNeuron.wgsl | 164 ++-
axon/shaders/MinusPhasePool.wgsl | 163 ++-
axon/shaders/MinusPhasePost.wgsl | 177 ++-
axon/shaders/NewStateLayer.wgsl | 163 ++-
axon/shaders/NewStateNeuron.wgsl | 179 ++-
axon/shaders/PlusPhaseNeuron.wgsl | 177 ++-
axon/shaders/PlusPhasePool.wgsl | 163 ++-
axon/shaders/PlusPhasePost.wgsl | 181 ++--
axon/shaders/PlusPhaseStartNeuron.wgsl | 163 ++-
axon/shaders/PoolGi.wgsl | 163 ++-
axon/shaders/SendSpike.wgsl | 173 ++-
axon/shaders/SlowAdaptLayer.wgsl | 163 ++-
axon/shaders/SlowAdaptNeuron.wgsl | 163 ++-
axon/shaders/WtFromDWtLayer.wgsl | 163 ++-
axon/shaders/WtFromDWtSyn.wgsl | 163 ++-
axon/threads_test.go | 2 +-
axon/typegen.go | 16 +-
chans/skca.go | 2 +-
examples/bench_lvis/bench_lvis.go | 1 -
examples/choose/choose.go | 8 +-
examples/deep_fsa/config.go | 14 +-
examples/deep_fsa/deep_fsa.go | 57 +-
examples/deep_fsa/params.go | 2 +-
examples/dls/dls.go | 6 +-
examples/pcore_ds/pcore_ds.go | 2 +-
examples/pvlv/pvlv.go | 2 +-
examples/ra25/ra25.go | 27 +-
kinase/linear/linear.go | 8 +-
78 files changed, 5976 insertions(+), 3192 deletions(-)
create mode 100644 axon/shaders/Beta1Neuron.wgsl
create mode 100644 axon/shaders/Beta2Neuron.wgsl
diff --git a/Deep.md b/Deep.md
index fcb17cce4..93657198b 100644
--- a/Deep.md
+++ b/Deep.md
@@ -30,7 +30,7 @@ The predictive pulvinar TRC is created and associated with the *driver* layer, a
This package has 3 primary specialized Layer types:
-* `SuperLayer`: implements the superficial layer 2-3 neurons, which function just like standard axon.Layer neurons, and always represent the _current state_ of things. They learn continuously from predictive learning error signals, are widely interconnected with other cortical areas, and form the basis for the learned representations in other layers. As a computational simplification, they can also directly compute the Burst activation signal that reflects the deep layer 5IB bursting activation, via thresholding of the superficial layer activations (Bursting is thought to have a higher threshold). Activity is represented by the `CaSpkP` value -- `Act` is used only for display purposes!
+* `SuperLayer`: implements the superficial layer 2-3 neurons, which function just like standard axon.Layer neurons, and always represent the _current state_ of things. They learn continuously from predictive learning error signals, are widely interconnected with other cortical areas, and form the basis for the learned representations in other layers. As a computational simplification, they can also directly compute the Burst activation signal that reflects the deep layer 5IB bursting activation, via thresholding of the superficial layer activations (Bursting is thought to have a higher threshold). Activity is represented by the `CaP` value -- `Act` is used only for display purposes!
* `CTLayer`: implements the layer 6 regular spiking CT corticothalamic neurons that project into the thalamus. They receive the Burst activation via a `CTCtxtPath` pathway type, and integrate that in the CtxtGe value, which is added to other excitatory conductance inputs to drive the overall activation of these neurons. Due to the bursting nature of the Burst inputs, this causes these CT layer neurons to reflect what the superficial layers encoded on the *previous* timestep -- thus they represent a temporally delayed context state.
diff --git a/GPU.md b/GPU.md
index 54f32b688..36dc83f6b 100644
--- a/GPU.md
+++ b/GPU.md
@@ -206,7 +206,7 @@ There is a hard max storage buffer limit of 4 GiB (uint32), and `MaxStorageBuffe
+ `Layer.Act.` -> `Layer.Acts.`
+ `Layer.Acts.GABAB.` -> `Layer.Acts.GabaB.`
+ `Layer.Acts.Spike.` -> `Layer.Acts.Spikes.`
- + `Layer.Learn.CaLrn.` -> `Layer.Learn.CaLearn.`
+ + `Layer.Learn.LearnCa.` -> `Layer.Learn.CaLearn.`
diff --git a/PCoreBG.md b/PCoreBG.md
index 3d749df49..3c74f67e1 100644
--- a/PCoreBG.md
+++ b/PCoreBG.md
@@ -88,7 +88,7 @@ The key challenge in BG learning is that the `da` term typically comes significa
* `Tr += sn.Act * rn.Act`
-(we actually use `sn.CaSpkD` and `rn.GeIntMax` which are spiking Ca variables, and GeIntMax captures the max activity over the trial because MSN firing is transient).
+(we actually use `sn.CaD` and `rn.GeIntMax` which are spiking Ca variables, and GeIntMax captures the max activity over the trial because MSN firing is transient).
And then we leverage the _reward salience_ firing properties of cholinergic interneurons (CINs, AKA TANs = tonically active neurons) to provide a later "learn now" signal by firing in proportion to the non-discounted, positive rectified US or CS value (i.e., whenever any kind of reward or punishment signal arrives, or is indicated by a CS). Thus, at the point of high ACh firing, which coincides with DA release, we get:
@@ -100,7 +100,7 @@ and the trace is effectively reset by a decay factor:
One further wrinkle is that the BG will become permanently stuck if there is no gating at all -- trial and error learning requires "trials" of activity to learn! Thus, we introduce a slow "NoGate" learning case on trials where no neurons gated within the layer:
-* `Tr += -NoGateLRate * ACh * rn.SpkMax * sn.CaSpkD`
+* `Tr += -NoGateLRate * ACh * rn.SpkMax * sn.CaD`
# Other models
diff --git a/README.md b/README.md
index 30c81a559..d13612a37 100644
--- a/README.md
+++ b/README.md
@@ -285,7 +285,7 @@ The final term in the credit assignment factor is the derivative of the receivin
$$ y' = y (1-y) $$
-which is maximal at y = .5 and zero at either 0 or 1. In Axon, this is computed using a time-integrated spike-driven Ca-like term (`CaSpkD`), with the max value across the layer used instead of the fixed 1 constant. In addition, it is useful to use an additional factor that reflects the normalized difference in receiving spiking across the minus and plus phase, which can be thought of as an empirical measure of the sensitivity of the receiving neuron to changes over time:
+which is maximal at y = .5 and zero at either 0 or 1. In Axon, this is computed using a time-integrated spike-driven Ca-like term (`CaD`), with the max value across the layer used instead of the fixed 1 constant. In addition, it is useful to use an additional factor that reflects the normalized difference in receiving spiking across the minus and plus phase, which can be thought of as an empirical measure of the sensitivity of the receiving neuron to changes over time:
$$ y' = y (1-y) \frac{y^+ - y^-}{\text{MAX}(y^+, y^-)} $$
@@ -348,24 +348,23 @@ The [`axon.Neuron`](axon/neuron.go) struct contains all the neuron (unit) level
#### Calcium for learning
* `CaSyn` = spike-driven calcium trace for synapse-level Ca-driven learning: EWMA of `SpikeG * Spike` with smoothing factor `1/SynTau` (typically 1/30). Synapses smooth `send.CaSyn * recv.CaSyn` with M, P, D smoothing factors for the synaptic trace driving credit assignment in learning. Smoothing factors reflects binding time of Glu to NMDA and Ca buffering postsynaptically, and determines time window where pre * post spiking must overlap to drive learning.
-* `CaSpkM` = spike-driven calcium trace used as a neuron-level proxy for synpatic credit assignment factor based on EWMA of `SpikeG * Spike` with smoothing factor `1/MTau` (typically 1/5). Simulates a calmodulin (CaM) like signal at the most abstract level.
-* `CaSpkP` = EWMA of `CaSpkM` with smoothing factor `1/PTau` (typically 1/40), representing neuron-level purely spiking version of plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of `Act`.
-* `CaSpkD` = EWMA of `CaSpkP` with smoothing factor `1/DTau` (typically 1/40), representing neuron-level purely spiking version of minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of `Act`.
-* `CaSpkPM` = minus-phase snapshot of the CaSpkP value -- similar to ActM but using a more directly spike-integrated value.
-* `CaLrn` = recv neuron calcium signal used to drive temporal error difference component of standard learning rule, combining NMDA (`NmdaCa`) and spiking-driven VGCC (`VgccCaInt`) calcium sources (vs. `CaSpk*` which only reflects spiking component). This is integrated into `CaM`, `CaP`, `CaD`, and temporal derivative is `CaP - CaD` (CaMKII - DAPK1). This approximates the backprop error derivative on net input, but the VGCC component adds a proportion of recv activation delta as well -- a balance of both works best. The synaptic-level trace multiplier provides the credit assignment factor, reflecting coincident activity and potentially smoothed over longer multi-trial timescales.
-* `CaM` = EWMA of `CaLrn` with smoothing factor `1/MTau` (typically 1/5), simulating a calmodulin (`CaM`) like signal, which then drives `CaP`, `CaD` for delta signal driving error-driven learning.
+* `CaM` = spike-driven calcium trace used as a neuron-level proxy for synpatic credit assignment factor based on EWMA of `SpikeG * Spike` with smoothing factor `1/MTau` (typically 1/5). Simulates a calmodulin (CaM) like signal at the most abstract level.
+* `CaP` = EWMA of `CaM` with smoothing factor `1/PTau` (typically 1/40), representing neuron-level purely spiking version of plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of `Act`.
+* `CaD` = EWMA of `CaP` with smoothing factor `1/DTau` (typically 1/40), representing neuron-level purely spiking version of minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of `Act`.
+* `LearnCa` = recv neuron calcium signal used to drive temporal error difference component of standard learning rule, combining NMDA (`NmdaCa`) and spiking-driven VGCC (`VgccCaInt`) calcium sources (vs. `CaSpk*` which only reflects spiking component). This is integrated into `CaM`, `CaP`, `CaD`, and temporal derivative is `CaP - CaD` (CaMKII - DAPK1). This approximates the backprop error derivative on net input, but the VGCC component adds a proportion of recv activation delta as well -- a balance of both works best. The synaptic-level trace multiplier provides the credit assignment factor, reflecting coincident activity and potentially smoothed over longer multi-trial timescales.
+* `CaM` = EWMA of `LearnCa` with smoothing factor `1/MTau` (typically 1/5), simulating a calmodulin (`CaM`) like signal, which then drives `CaP`, `CaD` for delta signal driving error-driven learning.
* `CaP` = EWMA of `CaM` with smoothing factor `1/PTau` (typically 1/40), representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule.
* `CaD` = EWMA of `CaP` with smoothing factor `1/DTau` (typically 1/40), representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule.
* `CaDiff` = difference `CaP - CaD `-- this is the error signal that drives error-driven learning.
-* `RLRate` = recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from the `CaSpkD` of recv unit, and the normalized difference `(CaSpkP - CaSpkD) / Max(CaSpkP - CaSpkD)`.
+* `RLRate` = recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from the `CaD` of recv unit, and the normalized difference `(CaP - CaD) / Max(CaP - CaD)`.
#### Stats, aggregate values
-* `SpkMaxCa` = Ca smoothed like `CaSpkP` but only starting at `MaxCycStart` cycle, to prevent inclusion of carryover spiking from prior theta cycle trial -- the `PTau` time constant otherwise results in significant carryover. This is the input to `SpkMax`.
-* `SpkMax` = maximum `CaSpkP` across one theta cycle time window (max of `SpkMaxCa`) -- used for specialized algorithms that have more phasic behavior within a single trial, e.g., basal ganglia matrix layer gating. Also useful for visualization of peak activity of neurons.
-* `SpkPrv` = final `CaSpkD` activation state at end of previous theta cycle. Used for specialized learning mechanisms that operate on delayed sending activations.
-* `SpkSt1` = the activation state at specific time point within current state processing window (e.g., 50 msec for beta cycle within standard theta cycle), as saved by `SpkSt1()` function. Used for example in hippocampus for CA3, CA1 learning.
-* `SpkSt2` = the activation state at specific time point within current state processing window (e.g., 100 msec for beta cycle within standard theta cycle), as saved by `SpkSt2()` function. Used for example in hippocampus for CA3, CA1 learning.
+* `SpkMaxCa` = Ca smoothed like `CaP` but only starting at `MaxCycStart` cycle, to prevent inclusion of carryover spiking from prior theta cycle trial -- the `PTau` time constant otherwise results in significant carryover. This is the input to `SpkMax`.
+* `SpkMax` = maximum `CaP` across one theta cycle time window (max of `SpkMaxCa`) -- used for specialized algorithms that have more phasic behavior within a single trial, e.g., basal ganglia matrix layer gating. Also useful for visualization of peak activity of neurons.
+* `SpkPrv` = final `CaD` activation state at end of previous theta cycle. Used for specialized learning mechanisms that operate on delayed sending activations.
+* `Beta1` = the activation state at specific time point within current state processing window (e.g., 50 msec for beta cycle within standard theta cycle), as saved by `Beta1()` function. Used for example in hippocampus for CA3, CA1 learning.
+* `Beta2` = the activation state at specific time point within current state processing window (e.g., 100 msec for beta cycle within standard theta cycle), as saved by `Beta2()` function. Used for example in hippocampus for CA3, CA1 learning.
#### Long-term average activation, set point for synaptic scaling
@@ -449,7 +448,7 @@ The [`axon.Neuron`](axon/neuron.go) struct contains all the neuron (unit) level
#### Special layer type variables
-* `Burst` = 5IB bursting activation value, computed by thresholding regular CaSpkP value in Super superficial layers.
+* `Burst` = 5IB bursting activation value, computed by thresholding regular CaP value in Super superficial layers.
* `BurstPrv` = previous Burst bursting activation from prior time step -- used for context-based learning.
* `CtxtGe` = context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.
* `CtxtGeRaw` = raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.
@@ -493,7 +492,7 @@ The `axon.Network` `CycleImpl` method in [`axon/network.go`](axon/network.go) ca
* `GiFmSpikes` on all `Layer`s: computes inhibitory conductances based on total incoming FF and FB spikes into the layer, using the [FS-FFFB](fsfffb) summary functions.
-* `CycleNeuron` on all `Neuron`s: integrates the Ge and Gi conductances from above, updates all the other channel conductances as described in [chans](chans), and then computes `Inet` as the net current from all these conductances, which then drives updates to `Vm` and `VmDend`. If `Vm` exceeds threshold then `Spike` = 1. It also updates the neuron-level calcium variables that drive learning (`CaLrn`, `CaM`, `CaP`, `CaD` and `CaSpk` versions of these).
+* `CycleNeuron` on all `Neuron`s: integrates the Ge and Gi conductances from above, updates all the other channel conductances as described in [chans](chans), and then computes `Inet` as the net current from all these conductances, which then drives updates to `Vm` and `VmDend`. If `Vm` exceeds threshold then `Spike` = 1. It also updates the neuron-level calcium variables that drive learning (`LearnCa`, `CaM`, `CaP`, `CaD` and `CaSpk` versions of these).
* `SendSpike` on all `Neuron`s: for each neuron with `Spike` = 1, adds scaled synaptic weight value to `GBuf` ring buffer for efficiently delaying receipt of the spike per parametrized `Com.Delay` cycles. This is what the `PathGatherSpikes` then integrates. This is very expensive computationally because it goes through every synapse.
@@ -597,11 +596,11 @@ If the neuron has just spiked within the `Tr` refractory time window (3 msec def
If the neuron did not spike, then `ISI++` is incremented.
-#### CaFmSpike: CaLrn (NMDA + VGCC) and Simple Spike-driven Ca Signals
+#### CaFmSpike: LearnCa (NMDA + VGCC) and Simple Spike-driven Ca Signals
-The core Ca calcium value that drives the *trace - kinase* learning rule is stored in the `CaLrn` neuron variable, as a sum of NMDA and VGCC calcium influx:
+The core Ca calcium value that drives the *trace - kinase* learning rule is stored in the `LearnCa` neuron variable, as a sum of NMDA and VGCC calcium influx:
-* `CaLrn = (NmdaCa + VgccCaInt) / Norm`
+* `LearnCa = (NmdaCa + VgccCaInt) / Norm`
Where `Norm` (80) renormalizes the concentration-based factors to a range that works well for learning.
@@ -614,17 +613,17 @@ In larger networks, directly using the calcium flux from the `VGCC` channel (`Vg
VgccCaInt += VgccCa - VgccCaInt / VgccTau // VgccTau = 10 msec
```
-This immediate `CaLrn` value is then subject to multiple levels of additional integration processes, reflecting the CaM calmodulin -> CaMKII -> DAPK1 cascades, into the `CaM`, `CaP` and `CaD` variables. The same time constants are used for as smoothing factors for these processes across various different variables, and are defined in the [kinase](kinase) package, as follows:
+This immediate `LearnCa` value is then subject to multiple levels of additional integration processes, reflecting the CaM calmodulin -> CaMKII -> DAPK1 cascades, into the `CaM`, `CaP` and `CaD` variables. The same time constants are used for as smoothing factors for these processes across various different variables, and are defined in the [kinase](kinase) package, as follows:
-* `MTau` (2 or 5 msec) for `CaM` or `CaSpkM` = calmodulin time constant in cycles (msec) -- for synaptic-level integration this integrates on top of Ca signal from `send->CaSyn * recv->CaSyn`, each of which are typically smoothed with a 30 msec Tau.
+* `MTau` (2 or 5 msec) for `CaM` or `CaM` = calmodulin time constant in cycles (msec) -- for synaptic-level integration this integrates on top of Ca signal from `send->CaSyn * recv->CaSyn`, each of which are typically smoothed with a 30 msec Tau.
-* `PTau` (40 msec) for `CaP` or `CaSpkP` = LTP spike-driven Ca factor time constant in cycles (msec), simulating CaMKII in the Kinase framework, with 40 on top of `MTau` roughly tracking the biophysical rise time. Computationally, `CaP` represents the plus phase learning signal that reflects the most recent past information.
+* `PTau` (40 msec) for `CaP` or `CaP` = LTP spike-driven Ca factor time constant in cycles (msec), simulating CaMKII in the Kinase framework, with 40 on top of `MTau` roughly tracking the biophysical rise time. Computationally, `CaP` represents the plus phase learning signal that reflects the most recent past information.
-* `DTau` (40 msec) for `CaD` or `CaSpkD` = LTD spike-driven Ca factor time constant in cycles (msec), simulating DAPK1 in Kinase framework. Computationally, `CaD` represents the minus phase learning signal that reflects the expectation representation prior to experiencing the outcome (in addition to the outcome).
+* `DTau` (40 msec) for `CaD` or `CaD` = LTD spike-driven Ca factor time constant in cycles (msec), simulating DAPK1 in Kinase framework. Computationally, `CaD` represents the minus phase learning signal that reflects the expectation representation prior to experiencing the outcome (in addition to the outcome).
The cascading update looks like this:
```Go
- CaM += (CaLrn - CaM) / MTau
+ CaM += (LearnCa - CaM) / MTau
CaP += (CaM - CaP) / PTau
CaD += (CaP - CaD) / DTau
```
@@ -642,9 +641,9 @@ Finally, various peripheral aspects of learning (learning rate modulation, thres
The cascaded integration of these variables is:
```Go
- CaSpkM += (SpikeG * Spike - CaSpkM) / MTau
- CaSpkP += (CaSpkM - CaSpkP) / PTau
- CaSpkD += (CaSpkP - CaSpkD) / DTau
+ CaM += (SpikeG * Spike - CaM) / MTau
+ CaP += (CaM - CaP) / PTau
+ CaD += (CaP - CaD) / DTau
```
### SendSpike
@@ -655,7 +654,7 @@ This is expensive computationally because it requires traversing all of the syna
### SynCaSend, SynCaRecv
-If synapse-level calcium (Ca) is being used for the trace *Credit* assignment factor in learning, then two pathway-level functions are called across all pathways, which are optimized to first filter by any sending neurons that have just spiked (`SynCaSend`) and then any receiving neurons that spiked (`SynCaRecv`) -- Ca only needs to be updated in these two cases. This major opmitimization is only possible when using the simplified purely spike-driven form of Ca as in the `CaSpk` vars above. Another optimization is to exclude any neurons for which `CaSpkP` and `CaSpkD` are below a low update threshold `UpdateThr` = 0.01.
+If synapse-level calcium (Ca) is being used for the trace *Credit* assignment factor in learning, then two pathway-level functions are called across all pathways, which are optimized to first filter by any sending neurons that have just spiked (`SynCaSend`) and then any receiving neurons that spiked (`SynCaRecv`) -- Ca only needs to be updated in these two cases. This major opmitimization is only possible when using the simplified purely spike-driven form of Ca as in the `CaSpk` vars above. Another optimization is to exclude any neurons for which `CaP` and `CaD` are below a low update threshold `UpdateThr` = 0.01.
After filtering, the basic cascaded integration shown above is performed on synapse-level variables where the immediate driving Ca value is the product of `CaSyn` on the recv and send neurons times a `SpikeG` gain factor:
* `CaM += (SpikeG * send.CaSyn * recv.CaSyn - CaM) / MTau`
@@ -676,8 +675,8 @@ The *Credit* assignment component is the *trace*, based on the longest time-scal
* `Tr += (CaD - Tr) / Tau // Tau = 1 or 2+ trials`
Along with a `RLRate` factor that represents the derivative of the receiving activation, which is updated for each neuron at the end of the *plus* phase prior to doing `DWt`:
-* `RLRate = CaSpkD * (Max - CaSpkD) * (ABS(CaSpkP - CaSpkD) / MAX(CaSpkP - CaSpkD))`
- + `Max` = maximum CaSpkD value across the layer
+* `RLRate = CaD * (Max - CaD) * (ABS(CaP - CaD) / MAX(CaP - CaD))`
+ + `Max` = maximum CaD value across the layer
Thus, the complete learning function is:
* `DWt = (recv.CaP - recv.CaD) * Tr * recv.RLRate`
@@ -719,7 +718,7 @@ Every `SlowInterval` (100) Trials, the `SlowAdapt` methods are called on all Lay
### Target vs. Average Activity
First, when the network is initialized, a `TrgAvg` value is assigned to each neuron by uniformly sampling within a range of target values (0.5 - 2.0) and permuting the values among the set of neurons. This target is then updated as a function of the receiving unit error-gradient, subject to a zero-sum constraint across the relevant Pool of neurons:
-* `DTrgAvg += ErrLRate * (CaSpkP - CaSpkD) // ErrLRate = .02`
+* `DTrgAvg += ErrLRate * (CaP - CaD) // ErrLRate = .02`
* `TrgAvg += DTrgAvg - AVG(DTrgAvg) // zero-sum`
diff --git a/axon/act-layer.go b/axon/act-layer.go
index 4fd2c62ab..51086e70e 100644
--- a/axon/act-layer.go
+++ b/axon/act-layer.go
@@ -261,7 +261,7 @@ func (ly *LayerParams) PulvinarDriver(ctx *Context, lni, di uint32, drvGe, nonDr
dli := uint32(ly.Pulv.DriveLayIndex)
dly := GetLayers(dli)
dpi := dly.PoolIndex(0)
- drvMax := PoolAvgMax(AMCaSpkP, AMCycle, Max, dpi, di)
+ drvMax := PoolAvgMax(AMCaP, AMCycle, Max, dpi, di)
*nonDrivePct = ly.Pulv.NonDrivePct(drvMax) // how much non-driver to keep
burst := Neurons.Value(int(dly.Indexes.NeurSt+lni), int(di), int(Burst))
*drvGe = ly.Pulv.DriveGe(burst)
@@ -534,7 +534,7 @@ func (ly *LayerParams) SpikeFromG(ctx *Context, lpi, ni, di uint32) {
Neurons.Set(Neurons.Value(int(ni), int(di), int(GeInt)), int(ni), int(di), int(GeIntNorm))
}
if ctx.Cycle >= ly.Acts.Dt.MaxCycStart {
- Neurons.SetAdd(ly.Learn.CaSpk.Dt.PDt*(Neurons.Value(int(ni), int(di), int(CaSpkM))-Neurons.Value(int(ni), int(di), int(SpkMaxCa))), int(ni), int(di), int(SpkMaxCa))
+ Neurons.SetAdd(ly.Learn.CaSpk.Dt.PDt*(Neurons.Value(int(ni), int(di), int(CaM))-Neurons.Value(int(ni), int(di), int(SpkMaxCa))), int(ni), int(di), int(SpkMaxCa))
spkmax := Neurons.Value(int(ni), int(di), int(SpkMaxCa))
if spkmax > Neurons.Value(int(ni), int(di), int(SpkMax)) {
Neurons.Set(spkmax, int(ni), int(di), int(SpkMax))
@@ -566,7 +566,7 @@ func (ly *LayerParams) SendSpike(ctx *Context, ni, di uint32) {
// PostSpikeSpecial does updates at neuron level after spiking has been computed.
// This is where special layer types add extra code.
func (ly *LayerParams) PostSpikeSpecial(ctx *Context, lpi, pi, ni, di uint32) {
- Neurons.Set(Neurons.Value(int(ni), int(di), int(CaSpkP)), int(ni), int(di), int(Burst))
+ Neurons.Set(Neurons.Value(int(ni), int(di), int(CaP)), int(ni), int(di), int(Burst))
li := ly.Index
pil := pi - ly.PoolSt // 0-n pool index
pnn := uint32(PoolNNeurons(pi))
@@ -575,10 +575,10 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, lpi, pi, ni, di uint32) {
switch ly.Type {
case SuperLayer:
if ctx.PlusPhase.IsTrue() {
- actMax := PoolAvgMax(AMCaSpkP, AMCycle, Max, lpi, di)
- actAvg := PoolAvgMax(AMCaSpkP, AMCycle, Avg, lpi, di)
+ actMax := PoolAvgMax(AMCaP, AMCycle, Max, lpi, di)
+ actAvg := PoolAvgMax(AMCaP, AMCycle, Avg, lpi, di)
thr := ly.Bursts.ThrFromAvgMax(actAvg, actMax)
- if Neurons.Value(int(ni), int(di), int(CaSpkP)) < thr {
+ if Neurons.Value(int(ni), int(di), int(CaP)) < thr {
Neurons.Set(0.0, int(ni), int(di), int(Burst))
}
}
@@ -690,7 +690,7 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, lpi, pi, ni, di uint32) {
}
// PostSpike does updates at neuron level after spiking has been computed.
-// It calls PostSpikeSpecial. It also updates the CaSpkPCyc stats.
+// It calls PostSpikeSpecial. It also updates the CaPCyc stats.
func (ly *LayerParams) PostSpike(ctx *Context, lpi, pi, ni, di uint32) {
ly.PostSpikeSpecial(ctx, lpi, pi, ni, di)
intdt := ly.Acts.Dt.IntDt
@@ -748,7 +748,7 @@ func (ly *LayerParams) CyclePost(ctx *Context, di uint32) {
// CyclePostLayer is called for all layer types
func (ly *LayerParams) CyclePostLayer(ctx *Context, lpi, di uint32) {
- casp := PoolAvgMax(AMCaSpkP, AMCycle, Max, lpi, di)
+ casp := PoolAvgMax(AMCaP, AMCycle, Max, lpi, di)
if ctx.Cycle >= ly.Acts.Dt.MaxCycStart && casp > 0.5 { // todo: param
if LayerStates.Value(int(ly.Index), int(di), int(LayerRT)) <= 0 {
LayerStates.Set(float32(ctx.Cycle), int(ly.Index), int(di), int(LayerRT))
@@ -760,14 +760,14 @@ func (ly *LayerParams) CyclePostLayer(ctx *Context, lpi, di uint32) {
// for purposes of computing ACh salience value.
// Typically the input is a superior colliculus (SC) layer that rapidly
// accommodates after the onset of a stimulus.
-// using lpl.AvgMax.CaSpkP.Cycle.Max for layer activity measure.
+// using lpl.AvgMax.CaP.Cycle.Max for layer activity measure.
func (ly *LayerParams) LDTSrcLayAct(layIndex int32, di uint32) float32 {
if layIndex < 0 {
return 0
}
oly := GetLayers(uint32(layIndex))
opi := oly.PoolIndex(0)
- return PoolAvgMax(AMCaSpkP, AMCycle, Avg, opi, di)
+ return PoolAvgMax(AMCaP, AMCycle, Avg, opi, di)
}
func (ly *LayerParams) CyclePostLDTLayer(ctx *Context, di uint32, srcLay1Act, srcLay2Act, srcLay3Act, srcLay4Act float32) {
@@ -828,7 +828,7 @@ func (ly *LayerParams) CyclePostTDDaLayer(ctx *Context, di uint32) {
}
func (ly *LayerParams) CyclePostCeMLayer(ctx *Context, lpi, di uint32) {
- casd := PoolAvgMax(AMCaSpkD, AMCycle, Max, lpi, di)
+ casd := PoolAvgMax(AMCaD, AMCycle, Max, lpi, di)
if ly.Learn.NeuroMod.Valence == Positive {
GlobalScalars.Set(casd, int(GvCeMpos), int(di))
} else {
@@ -842,7 +842,7 @@ func (ly *LayerParams) CyclePostVTALayer(ctx *Context, di uint32) {
// note: needs to iterate over sub-pools in layer!
func (ly *LayerParams) CyclePostVSPatchLayer(ctx *Context, pi, di uint32, spi int32) {
- casd := PoolAvgMax(AMCaSpkD, AMCycle, Avg, pi, di)
+ casd := PoolAvgMax(AMCaD, AMCycle, Avg, pi, di)
if ly.Learn.NeuroMod.DAMod == D1Mod {
GlobalVectors.Set(casd, int(GvVSPatchD1), int(uint32(pi-1)), int(di))
} else {
@@ -926,7 +926,7 @@ func (ly *LayerParams) NewStatePool(ctx *Context, pi, di uint32) {
// Should already have presented the external input to the network at this point.
func (ly *LayerParams) NewStateNeuron(ctx *Context, ni, di uint32) {
Neurons.Set(Neurons.Value(int(ni), int(di), int(Burst)), int(ni), int(di), int(BurstPrv))
- Neurons.Set(Neurons.Value(int(ni), int(di), int(CaSpkD)), int(ni), int(di), int(SpkPrv))
+ Neurons.Set(Neurons.Value(int(ni), int(di), int(CaD)), int(ni), int(di), int(SpkPrv))
Neurons.Set(0.0, int(ni), int(di), int(SpkMax))
Neurons.Set(0.0, int(ni), int(di), int(SpkMaxCa))
ly.Acts.DecayState(ctx, ni, di, ly.Acts.Decay.Act, ly.Acts.Decay.Glong, ly.Acts.Decay.AHP)
@@ -934,6 +934,18 @@ func (ly *LayerParams) NewStateNeuron(ctx *Context, ni, di uint32) {
ly.Acts.KNaNewState(ctx, ni, di)
}
+// Beta1Neuron does neuron level Beta1 updating.
+func (ly *LayerParams) Beta1Neuron(ctx *Context, ni, di uint32) {
+ Neurons.Set(Neurons.Value(int(ni), int(di), int(CaP)), int(ni), int(di), int(Beta1))
+}
+
+// Beta2Neuron does neuron level Beta2 updating.
+func (ly *LayerParams) Beta2Neuron(ctx *Context, ni, di uint32) {
+ Neurons.Set(Neurons.Value(int(ni), int(di), int(CaP)), int(ni), int(di), int(Beta2))
+}
+
+//////// Minus Phase
+
func (ly *LayerParams) MinusPhasePool(ctx *Context, pi uint32) {
for di := uint32(0); di < ctx.NData; di++ {
PoolCycleToMinus(pi, di)
@@ -970,7 +982,6 @@ func (ly *LayerParams) AvgGeM(ctx *Context, di uint32, geIntMinusMax, giIntMinus
// MinusPhaseNeuron does neuron level minus-phase updating
func (ly *LayerParams) MinusPhaseNeuron(ctx *Context, ni, di uint32) {
Neurons.Set(Neurons.Value(int(ni), int(di), int(ActInt)), int(ni), int(di), int(ActM))
- Neurons.Set(Neurons.Value(int(ni), int(di), int(CaSpkP)), int(ni), int(di), int(CaSpkPM))
}
// MinusPhasePost does special algorithm processing at end of minus
@@ -1007,18 +1018,18 @@ func (ly *LayerParams) PlusPhaseNeuron(ctx *Context, ni, di uint32) {
pi := ly.PoolIndex(NeuronIxs.Value(int(ni), int(NrnSubPool)))
lpi := ly.PoolIndex(0)
Neurons.Set(Neurons.Value(int(ni), int(di), int(ActInt)), int(ni), int(di), int(ActP))
- nrnCaSpkP := Neurons.Value(int(ni), int(di), int(CaSpkP))
- nrnCaSpkD := Neurons.Value(int(ni), int(di), int(CaSpkD))
+ nrnCaP := Neurons.Value(int(ni), int(di), int(CaP))
+ nrnCaD := Neurons.Value(int(ni), int(di), int(CaD))
da := GlobalScalars.Value(int(GvDA), int(di))
ach := GlobalScalars.Value(int(GvACh), int(di))
- mlr := ly.Learn.RLRate.RLRateSigDeriv(nrnCaSpkD, PoolAvgMax(AMCaSpkD, AMCycle, Max, lpi, di))
+ mlr := ly.Learn.RLRate.RLRateSigDeriv(nrnCaD, PoolAvgMax(AMCaD, AMCycle, Max, lpi, di))
modlr := ly.Learn.NeuroMod.LRMod(da, ach)
dlr := float32(1)
hasRew := (GlobalScalars.Value(int(GvHasRew), int(di))) > 0
switch ly.Type {
case BLALayer:
- dlr = ly.Learn.RLRate.RLRateDiff(nrnCaSpkP, Neurons.Value(int(ni), int(di), int(SpkPrv))) // delta on previous trial
+ dlr = ly.Learn.RLRate.RLRateDiff(nrnCaP, Neurons.Value(int(ni), int(di), int(SpkPrv))) // delta on previous trial
if !ly.Learn.NeuroMod.IsBLAExt() && PoolsInt.Value(int(pi), int(0), int(PoolNeurSt)) == 0 { // first pool
dlr = 0 // first pool is novelty / curiosity -- no learn
}
@@ -1033,14 +1044,14 @@ func (ly *LayerParams) PlusPhaseNeuron(ctx *Context, ni, di uint32) {
modlr = 1 // don't use mod
}
default:
- dlr = ly.Learn.RLRate.RLRateDiff(nrnCaSpkP, nrnCaSpkD)
+ dlr = ly.Learn.RLRate.RLRateDiff(nrnCaP, nrnCaD)
}
Neurons.Set(mlr*dlr*modlr, int(ni), int(di), int(RLRate))
var tau float32
sahpN := Neurons.Value(int(ni), int(di), int(SahpN))
nrnSaphCa := Neurons.Value(int(ni), int(di), int(SahpCa))
ly.Acts.Sahp.NinfTauFromCa(nrnSaphCa, &sahpN, &tau)
- nrnSaphCa = ly.Acts.Sahp.CaInt(nrnSaphCa, nrnCaSpkD)
+ nrnSaphCa = ly.Acts.Sahp.CaInt(nrnSaphCa, nrnCaD)
Neurons.Set(sahpN, int(ni), int(di), int(SahpN))
Neurons.Set(nrnSaphCa, int(ni), int(di), int(SahpCa))
Neurons.Set(ly.Acts.Sahp.GsAHP(sahpN), int(ni), int(di), int(Gsahp))
@@ -1055,7 +1066,7 @@ func (ly *LayerParams) PlusPhasePost(ctx *Context) {
for spi := uint32(1); spi < np; spi++ {
for di := uint32(0); di < ctx.NData; di++ {
pi := ly.PoolIndex(spi)
- val := PoolAvgMax(AMCaSpkD, AMCycle, Avg, pi, di)
+ val := PoolAvgMax(AMCaD, AMCycle, Avg, pi, di)
GlobalVectors.Set(val, int(GvOFCposPTMaint), int(uint32(pi-1)), int(di))
}
}
@@ -1091,7 +1102,7 @@ func (ly *LayerParams) PlusPhaseActAvg(ctx *Context) {
dTrgSum := float32(0)
avgSum := float32(0)
for di := uint32(0); di < ctx.NData; di++ {
- dTrgSum += ly.LearnTrgAvgErrLRate() * (Neurons.Value(int(ni), int(di), int(CaSpkP)) - Neurons.Value(int(ni), int(di), int(CaSpkD)))
+ dTrgSum += ly.LearnTrgAvgErrLRate() * (Neurons.Value(int(ni), int(di), int(CaP)) - Neurons.Value(int(ni), int(di), int(CaD)))
avgSum += ly.Acts.Dt.LongAvgDt * (Neurons.Value(int(ni), int(di), int(ActM)) - NeuronAvgs.Value(int(ni), int(ActAvg)))
}
NeuronAvgs.SetAdd(dTrgSum, int(ni), int(DTrgAvg))
@@ -1101,6 +1112,8 @@ func (ly *LayerParams) PlusPhaseActAvg(ctx *Context) {
//gosl:end
+//////// Apply Ext
+
// InitExt initializes external input state.
// Should be called prior to ApplyExt on all layers receiving Ext input.
func (ly *Layer) InitExt() {
@@ -1322,31 +1335,3 @@ func (ly *Layer) ClearTargExt(ctx *Context) {
}
}
}
-
-// SpkSt1 saves current activation state in SpkSt1 variables (using CaP)
-func (ly *Layer) SpkSt1(ctx *Context) {
- nn := ly.NNeurons
- for lni := uint32(0); lni < nn; lni++ {
- ni := ly.NeurStIndex + lni
- if NeuronIsOff(ni) {
- continue
- }
- for di := uint32(0); di < ctx.NData; di++ {
- Neurons.Set(Neurons.Value(int(ni), int(di), int(CaSpkP)), int(SpkSt1), int(di), int(ni))
- }
- }
-}
-
-// SpkSt2 saves current activation state in SpkSt2 variables (using CaP)
-func (ly *Layer) SpkSt2(ctx *Context) {
- nn := ly.NNeurons
- for lni := uint32(0); lni < nn; lni++ {
- ni := ly.NeurStIndex + lni
- if NeuronIsOff(ni) {
- continue
- }
- for di := uint32(0); di < ctx.NData; di++ {
- Neurons.Set(Neurons.Value(int(ni), int(di), int(CaSpkP)), int(SpkSt2), int(di), int(ni))
- }
- }
-}
diff --git a/axon/act-layer.goal b/axon/act-layer.goal
index e1dd8a6e9..68becc4d3 100644
--- a/axon/act-layer.goal
+++ b/axon/act-layer.goal
@@ -259,7 +259,7 @@ func (ly *LayerParams) PulvinarDriver(ctx *Context, lni, di uint32, drvGe, nonDr
dli := uint32(ly.Pulv.DriveLayIndex)
dly := GetLayers(dli)
dpi := dly.PoolIndex(0)
- drvMax := PoolAvgMax(AMCaSpkP, AMCycle, Max, dpi, di)
+ drvMax := PoolAvgMax(AMCaP, AMCycle, Max, dpi, di)
*nonDrivePct = ly.Pulv.NonDrivePct(drvMax) // how much non-driver to keep
burst := Neurons[dly.Indexes.NeurSt+lni, di, Burst]
*drvGe = ly.Pulv.DriveGe(burst)
@@ -532,7 +532,7 @@ func (ly *LayerParams) SpikeFromG(ctx *Context, lpi, ni, di uint32) {
Neurons[ni, di, GeIntNorm] = Neurons[ni, di, GeInt]
}
if ctx.Cycle >= ly.Acts.Dt.MaxCycStart {
- Neurons[ni, di, SpkMaxCa] += ly.Learn.CaSpk.Dt.PDt * (Neurons[ni, di, CaSpkM] - Neurons[ni, di, SpkMaxCa])
+ Neurons[ni, di, SpkMaxCa] += ly.Learn.CaSpk.Dt.PDt * (Neurons[ni, di, CaM] - Neurons[ni, di, SpkMaxCa])
spkmax := Neurons[ni, di, SpkMaxCa]
if spkmax > Neurons[ni, di, SpkMax] {
Neurons[ni, di, SpkMax] = spkmax
@@ -564,7 +564,7 @@ func (ly *LayerParams) SendSpike(ctx *Context, ni, di uint32) {
// PostSpikeSpecial does updates at neuron level after spiking has been computed.
// This is where special layer types add extra code.
func (ly *LayerParams) PostSpikeSpecial(ctx *Context, lpi, pi, ni, di uint32) {
- Neurons[ni, di, Burst] = Neurons[ni, di, CaSpkP]
+ Neurons[ni, di, Burst] = Neurons[ni, di, CaP]
li := ly.Index
pil := pi - ly.PoolSt // 0-n pool index
pnn := uint32(PoolNNeurons(pi))
@@ -573,10 +573,10 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, lpi, pi, ni, di uint32) {
switch ly.Type {
case SuperLayer:
if ctx.PlusPhase.IsTrue() {
- actMax := PoolAvgMax(AMCaSpkP, AMCycle, Max, lpi, di)
- actAvg := PoolAvgMax(AMCaSpkP, AMCycle, Avg, lpi, di)
+ actMax := PoolAvgMax(AMCaP, AMCycle, Max, lpi, di)
+ actAvg := PoolAvgMax(AMCaP, AMCycle, Avg, lpi, di)
thr := ly.Bursts.ThrFromAvgMax(actAvg, actMax)
- if Neurons[ni, di, CaSpkP] < thr {
+ if Neurons[ni, di, CaP] < thr {
Neurons[ni, di, Burst] = 0.0
}
}
@@ -688,7 +688,7 @@ func (ly *LayerParams) PostSpikeSpecial(ctx *Context, lpi, pi, ni, di uint32) {
}
// PostSpike does updates at neuron level after spiking has been computed.
-// It calls PostSpikeSpecial. It also updates the CaSpkPCyc stats.
+// It calls PostSpikeSpecial. It also updates the CaPCyc stats.
func (ly *LayerParams) PostSpike(ctx *Context, lpi, pi, ni, di uint32) {
ly.PostSpikeSpecial(ctx, lpi, pi, ni, di)
intdt := ly.Acts.Dt.IntDt
@@ -746,7 +746,7 @@ func (ly *LayerParams) CyclePost(ctx *Context, di uint32) {
// CyclePostLayer is called for all layer types
func (ly *LayerParams) CyclePostLayer(ctx *Context, lpi, di uint32) {
- casp := PoolAvgMax(AMCaSpkP, AMCycle, Max, lpi, di)
+ casp := PoolAvgMax(AMCaP, AMCycle, Max, lpi, di)
if ctx.Cycle >= ly.Acts.Dt.MaxCycStart && casp > 0.5 { // todo: param
if LayerStates[ly.Index, di, LayerRT] <= 0 {
LayerStates[ly.Index, di, LayerRT] = float32(ctx.Cycle)
@@ -758,14 +758,14 @@ func (ly *LayerParams) CyclePostLayer(ctx *Context, lpi, di uint32) {
// for purposes of computing ACh salience value.
// Typically the input is a superior colliculus (SC) layer that rapidly
// accommodates after the onset of a stimulus.
-// using lpl.AvgMax.CaSpkP.Cycle.Max for layer activity measure.
+// using lpl.AvgMax.CaP.Cycle.Max for layer activity measure.
func (ly *LayerParams) LDTSrcLayAct(layIndex int32, di uint32) float32 {
if layIndex < 0 {
return 0
}
oly := GetLayers(uint32(layIndex))
opi := oly.PoolIndex(0)
- return PoolAvgMax(AMCaSpkP, AMCycle, Avg, opi, di)
+ return PoolAvgMax(AMCaP, AMCycle, Avg, opi, di)
}
@@ -827,7 +827,7 @@ func (ly *LayerParams) CyclePostTDDaLayer(ctx *Context, di uint32) {
}
func (ly *LayerParams) CyclePostCeMLayer(ctx *Context, lpi, di uint32) {
- casd := PoolAvgMax(AMCaSpkD, AMCycle, Max, lpi, di)
+ casd := PoolAvgMax(AMCaD, AMCycle, Max, lpi, di)
if ly.Learn.NeuroMod.Valence == Positive {
GlobalScalars[GvCeMpos, di] = casd
} else {
@@ -841,7 +841,7 @@ func (ly *LayerParams) CyclePostVTALayer(ctx *Context, di uint32) {
// note: needs to iterate over sub-pools in layer!
func (ly *LayerParams) CyclePostVSPatchLayer(ctx *Context, pi, di uint32, spi int32) {
- casd := PoolAvgMax(AMCaSpkD, AMCycle, Avg, pi, di)
+ casd := PoolAvgMax(AMCaD, AMCycle, Avg, pi, di)
if ly.Learn.NeuroMod.DAMod == D1Mod {
GlobalVectors[GvVSPatchD1, uint32(pi-1), di] = casd
} else {
@@ -925,7 +925,7 @@ func (ly *LayerParams) NewStatePool(ctx *Context, pi, di uint32) {
// Should already have presented the external input to the network at this point.
func (ly *LayerParams) NewStateNeuron(ctx *Context, ni, di uint32) {
Neurons[ni, di, BurstPrv] = Neurons[ni, di, Burst]
- Neurons[ni, di, SpkPrv] = Neurons[ni, di, CaSpkD]
+ Neurons[ni, di, SpkPrv] = Neurons[ni, di, CaD]
Neurons[ni, di, SpkMax] = 0.0
Neurons[ni, di, SpkMaxCa] = 0.0
ly.Acts.DecayState(ctx, ni, di, ly.Acts.Decay.Act, ly.Acts.Decay.Glong, ly.Acts.Decay.AHP)
@@ -933,6 +933,18 @@ func (ly *LayerParams) NewStateNeuron(ctx *Context, ni, di uint32) {
ly.Acts.KNaNewState(ctx, ni, di)
}
+// Beta1Neuron does neuron level Beta1 updating.
+func (ly *LayerParams) Beta1Neuron(ctx *Context, ni, di uint32) {
+ Neurons[ni, di, Beta1] = Neurons[ni, di, CaP]
+}
+
+// Beta2Neuron does neuron level Beta2 updating.
+func (ly *LayerParams) Beta2Neuron(ctx *Context, ni, di uint32) {
+ Neurons[ni, di, Beta2] = Neurons[ni, di, CaP]
+}
+
+//////// Minus Phase
+
func (ly *LayerParams) MinusPhasePool(ctx *Context, pi uint32) {
for di := uint32(0); di < ctx.NData; di++ {
PoolCycleToMinus(pi, di)
@@ -969,7 +981,6 @@ func (ly *LayerParams) AvgGeM(ctx *Context, di uint32, geIntMinusMax, giIntMinus
// MinusPhaseNeuron does neuron level minus-phase updating
func (ly *LayerParams) MinusPhaseNeuron(ctx *Context, ni, di uint32) {
Neurons[ni, di, ActM] = Neurons[ni, di, ActInt]
- Neurons[ni, di, CaSpkPM] = Neurons[ni, di, CaSpkP]
}
// MinusPhasePost does special algorithm processing at end of minus
@@ -1006,18 +1017,18 @@ func (ly *LayerParams) PlusPhaseNeuron(ctx *Context, ni, di uint32) {
pi := ly.PoolIndex(NeuronIxs[ni, NrnSubPool])
lpi := ly.PoolIndex(0)
Neurons[ni, di, ActP] = Neurons[ni, di, ActInt]
- nrnCaSpkP := Neurons[ni, di, CaSpkP]
- nrnCaSpkD := Neurons[ni, di, CaSpkD]
+ nrnCaP := Neurons[ni, di, CaP]
+ nrnCaD := Neurons[ni, di, CaD]
da := GlobalScalars[GvDA, di]
ach := GlobalScalars[GvACh, di]
- mlr := ly.Learn.RLRate.RLRateSigDeriv(nrnCaSpkD, PoolAvgMax(AMCaSpkD, AMCycle, Max, lpi, di))
+ mlr := ly.Learn.RLRate.RLRateSigDeriv(nrnCaD, PoolAvgMax(AMCaD, AMCycle, Max, lpi, di))
modlr := ly.Learn.NeuroMod.LRMod(da, ach)
dlr := float32(1)
hasRew := (GlobalScalars[GvHasRew, di]) > 0
switch ly.Type {
case BLALayer:
- dlr = ly.Learn.RLRate.RLRateDiff(nrnCaSpkP, Neurons[ni, di, SpkPrv]) // delta on previous trial
+ dlr = ly.Learn.RLRate.RLRateDiff(nrnCaP, Neurons[ni, di, SpkPrv]) // delta on previous trial
if !ly.Learn.NeuroMod.IsBLAExt() && PoolsInt[pi, 0, PoolNeurSt] == 0 { // first pool
dlr = 0 // first pool is novelty / curiosity -- no learn
}
@@ -1032,14 +1043,14 @@ func (ly *LayerParams) PlusPhaseNeuron(ctx *Context, ni, di uint32) {
modlr = 1 // don't use mod
}
default:
- dlr = ly.Learn.RLRate.RLRateDiff(nrnCaSpkP, nrnCaSpkD)
+ dlr = ly.Learn.RLRate.RLRateDiff(nrnCaP, nrnCaD)
}
Neurons[ni, di, RLRate] = mlr * dlr * modlr
var tau float32
sahpN := Neurons[ni, di, SahpN]
nrnSaphCa := Neurons[ni, di, SahpCa]
ly.Acts.Sahp.NinfTauFromCa(nrnSaphCa, &sahpN, &tau)
- nrnSaphCa = ly.Acts.Sahp.CaInt(nrnSaphCa, nrnCaSpkD)
+ nrnSaphCa = ly.Acts.Sahp.CaInt(nrnSaphCa, nrnCaD)
Neurons[ni, di, SahpN] = sahpN
Neurons[ni, di, SahpCa] = nrnSaphCa
Neurons[ni, di, Gsahp] = ly.Acts.Sahp.GsAHP(sahpN)
@@ -1054,7 +1065,7 @@ func (ly *LayerParams) PlusPhasePost(ctx *Context) {
for spi := uint32(1); spi < np; spi++ {
for di := uint32(0); di < ctx.NData; di++ {
pi := ly.PoolIndex(spi)
- val := PoolAvgMax(AMCaSpkD, AMCycle, Avg, pi, di)
+ val := PoolAvgMax(AMCaD, AMCycle, Avg, pi, di)
GlobalVectors[GvOFCposPTMaint, uint32(pi-1), di] = val
}
}
@@ -1090,7 +1101,7 @@ func (ly *LayerParams) PlusPhaseActAvg(ctx *Context) {
dTrgSum := float32(0)
avgSum := float32(0)
for di := uint32(0); di < ctx.NData; di++ {
- dTrgSum += ly.LearnTrgAvgErrLRate() * (Neurons[ni, di, CaSpkP] - Neurons[ni, di, CaSpkD])
+ dTrgSum += ly.LearnTrgAvgErrLRate() * (Neurons[ni, di, CaP] - Neurons[ni, di, CaD])
avgSum += ly.Acts.Dt.LongAvgDt * (Neurons[ni, di, ActM] - NeuronAvgs[ni, ActAvg])
}
NeuronAvgs[ni, DTrgAvg] += dTrgSum
@@ -1100,6 +1111,8 @@ func (ly *LayerParams) PlusPhaseActAvg(ctx *Context) {
//gosl:end
+//////// Apply Ext
+
// InitExt initializes external input state.
// Should be called prior to ApplyExt on all layers receiving Ext input.
func (ly *Layer) InitExt() {
@@ -1322,31 +1335,3 @@ func (ly *Layer) ClearTargExt(ctx *Context) {
}
}
-// SpkSt1 saves current activation state in SpkSt1 variables (using CaP)
-func (ly *Layer) SpkSt1(ctx *Context) {
- nn := ly.NNeurons
- for lni := uint32(0); lni < nn; lni++ {
- ni := ly.NeurStIndex + lni
- if NeuronIsOff(ni) {
- continue
- }
- for di := uint32(0); di < ctx.NData; di++ {
- Neurons[SpkSt1, di, ni] = Neurons[ni, di, CaSpkP]
- }
- }
-}
-
-// SpkSt2 saves current activation state in SpkSt2 variables (using CaP)
-func (ly *Layer) SpkSt2(ctx *Context) {
- nn := ly.NNeurons
- for lni := uint32(0); lni < nn; lni++ {
- ni := ly.NeurStIndex + lni
- if NeuronIsOff(ni) {
- continue
- }
- for di := uint32(0); di < ctx.NData; di++ {
- Neurons[SpkSt2, di, ni] = Neurons[ni, di, CaSpkP]
- }
- }
-}
-
diff --git a/axon/act-net.go b/axon/act-net.go
index 875acdd1c..1c8d991cd 100644
--- a/axon/act-net.go
+++ b/axon/act-net.go
@@ -8,8 +8,6 @@ package axon
import "cogentcore.org/core/enums"
-// todo: don't even need layer-level ultimately.
-
// Cycle runs n cycles of activation updating.
// If getNeurons is true, then neuron state is synced back
// from the GPU (for cycle-level display etc). Otherwise only
@@ -21,7 +19,7 @@ func (nt *Network) Cycle(ncyc int, getNeurons bool) {
ld := int(nix.NLayers * ctx.NData)
pd := int(nix.NPools * ctx.NData)
- ToGPUCtxGlobal()
+ ToGPUCtxGlobal() // this is not a significant speed factor
for range ncyc {
RunGatherSpikes(nd)
RunLayerGi(ld)
@@ -105,6 +103,22 @@ func (nt *Network) ApplyExts() {
RunApplyExtsNeuron(nd)
}
+// Beta1 does updating at Beta1 timescale.
+func (nt *Network) Beta1() {
+ nix := nt.NetIxs()
+ ctx := nt.Context()
+ nd := int(nix.NNeurons * ctx.NData)
+ RunBeta1Neuron(nd)
+}
+
+// Beta2 does updating at Beta1 timescale.
+func (nt *Network) Beta2() {
+ nix := nt.NetIxs()
+ ctx := nt.Context()
+ nd := int(nix.NNeurons * ctx.NData)
+ RunBeta2Neuron(nd)
+}
+
// MinusPhase does updating after end of minus phase.
func (nt *Network) MinusPhase() {
nix := nt.NetIxs()
@@ -121,6 +135,8 @@ func (nt *Network) PlusPhaseStart() {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
+
+ ctx.PlusPhaseStart()
RunPlusPhaseStartNeuron(nd)
}
@@ -162,28 +178,6 @@ func (nt *Network) ClearTargExt() {
}
}
-// SpkSt1 saves current acts into SpkSt1 (using CaSpkP)
-func (nt *Network) SpkSt1() {
- ctx := nt.Context()
- for _, ly := range nt.Layers {
- if ly.Off {
- continue
- }
- ly.SpkSt1(ctx)
- }
-}
-
-// SpkSt2 saves current acts into SpkSt2 (using CaSpkP)
-func (nt *Network) SpkSt2() {
- ctx := nt.Context()
- for _, ly := range nt.Layers {
- if ly.Off {
- continue
- }
- ly.SpkSt2(ctx)
- }
-}
-
// GPUTestWrite writes values to neuron, for testing
func (nt *Network) GPUTestWrite() {
nix := nt.NetIxs()
@@ -304,6 +298,28 @@ func InitGBuffsPath(pti uint32) { //gosl:kernel
Paths[pti].InitGBuffs(ctx)
}
+// Beta1Neuron is the kernel over Neurons * Data to
+// do neuron-level updating at Beta1.
+func Beta1Neuron(i uint32) { //gosl:kernel
+ ctx := GetCtx(0)
+ di := ctx.DataIndex(i)
+ ni := ctx.ItemIndex(i)
+ li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
+ Layers[li].Beta1Neuron(ctx, ni, di)
+}
+
+// Beta2Neuron is the kernel over Neurons * Data to
+// do neuron-level updating at Beta1.
+func Beta2Neuron(i uint32) { //gosl:kernel
+ ctx := GetCtx(0)
+ di := ctx.DataIndex(i)
+ ni := ctx.ItemIndex(i)
+ li := NeuronIxs.Value(int(ni), int(NrnLayIndex))
+ Layers[li].Beta2Neuron(ctx, ni, di)
+}
+
+//////// Minus Phase
+
// MinusPhasePool is the kernel over Pools to
// do pool-level updating after end of minus phase.
func MinusPhasePool(pi uint32) { //gosl:kernel
diff --git a/axon/act-net.goal b/axon/act-net.goal
index e5167a458..dd1890e1f 100644
--- a/axon/act-net.goal
+++ b/axon/act-net.goal
@@ -6,8 +6,6 @@ package axon
import "cogentcore.org/core/enums"
-// todo: don't even need layer-level ultimately.
-
// Cycle runs n cycles of activation updating.
// If getNeurons is true, then neuron state is synced back
// from the GPU (for cycle-level display etc). Otherwise only
@@ -19,7 +17,7 @@ func (nt *Network) Cycle(ncyc int, getNeurons bool) {
ld := int(nix.NLayers * ctx.NData)
pd := int(nix.NPools * ctx.NData)
- ToGPUCtxGlobal()
+ ToGPUCtxGlobal() // this is not a significant speed factor
for range ncyc {
RunGatherSpikes(nd)
RunLayerGi(ld)
@@ -99,6 +97,22 @@ func (nt *Network) ApplyExts() {
RunApplyExtsNeuron(nd)
}
+// Beta1 does updating at Beta1 timescale.
+func (nt *Network) Beta1() {
+ nix := nt.NetIxs()
+ ctx := nt.Context()
+ nd := int(nix.NNeurons * ctx.NData)
+ RunBeta1Neuron(nd)
+}
+
+// Beta2 does updating at Beta1 timescale.
+func (nt *Network) Beta2() {
+ nix := nt.NetIxs()
+ ctx := nt.Context()
+ nd := int(nix.NNeurons * ctx.NData)
+ RunBeta2Neuron(nd)
+}
+
// MinusPhase does updating after end of minus phase.
func (nt *Network) MinusPhase() {
nix := nt.NetIxs()
@@ -115,6 +129,8 @@ func (nt *Network) PlusPhaseStart() {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
+
+ ctx.PlusPhaseStart()
RunPlusPhaseStartNeuron(nd)
}
@@ -156,28 +172,6 @@ func (nt *Network) ClearTargExt() {
}
}
-// SpkSt1 saves current acts into SpkSt1 (using CaSpkP)
-func (nt *Network) SpkSt1() {
- ctx := nt.Context()
- for _, ly := range nt.Layers {
- if ly.Off {
- continue
- }
- ly.SpkSt1(ctx)
- }
-}
-
-// SpkSt2 saves current acts into SpkSt2 (using CaSpkP)
-func (nt *Network) SpkSt2() {
- ctx := nt.Context()
- for _, ly := range nt.Layers {
- if ly.Off {
- continue
- }
- ly.SpkSt2(ctx)
- }
-}
-
// GPUTestWrite writes values to neuron, for testing
func (nt *Network) GPUTestWrite() {
nix := nt.NetIxs()
@@ -298,6 +292,28 @@ func InitGBuffsPath(pti uint32) { //gosl:kernel
Paths[pti].InitGBuffs(ctx)
}
+// Beta1Neuron is the kernel over Neurons * Data to
+// do neuron-level updating at Beta1.
+func Beta1Neuron(i uint32) { //gosl:kernel
+ ctx := GetCtx(0)
+ di := ctx.DataIndex(i)
+ ni := ctx.ItemIndex(i)
+ li := NeuronIxs[ni, NrnLayIndex]
+ Layers[li].Beta1Neuron(ctx, ni, di)
+}
+
+// Beta2Neuron is the kernel over Neurons * Data to
+// do neuron-level updating at Beta1.
+func Beta2Neuron(i uint32) { //gosl:kernel
+ ctx := GetCtx(0)
+ di := ctx.DataIndex(i)
+ ni := ctx.ItemIndex(i)
+ li := NeuronIxs[ni, NrnLayIndex]
+ Layers[li].Beta2Neuron(ctx, ni, di)
+}
+
+//////// Minus Phase
+
// MinusPhasePool is the kernel over Pools to
// do pool-level updating after end of minus phase.
func MinusPhasePool(pi uint32) { //gosl:kernel
diff --git a/axon/act-path.go b/axon/act-path.go
index 335d80c90..7a3b68a6b 100644
--- a/axon/act-path.go
+++ b/axon/act-path.go
@@ -281,7 +281,7 @@ func (pt *PathParams) SendSpike(ctx *Context, ni, di, lni uint32) {
if uint32(ctx.Cycle) != uint32(ctx.ThetaCycles)-1-pt.Com.DelLen {
return
}
- sendVal *= Neurons.Value(int(ni), int(di), int(Burst)) // Burst is regular CaSpkP for all non-SuperLayer neurons
+ sendVal *= Neurons.Value(int(ni), int(di), int(Burst)) // Burst is regular CaP for all non-SuperLayer neurons
} else {
if Neurons.Value(int(ni), int(di), int(Spike)) == 0 {
return
diff --git a/axon/act-path.goal b/axon/act-path.goal
index 27ed95cfc..bd390fa4c 100644
--- a/axon/act-path.goal
+++ b/axon/act-path.goal
@@ -279,7 +279,7 @@ func (pt *PathParams) SendSpike(ctx *Context, ni, di, lni uint32) {
if uint32(ctx.Cycle) != uint32(ctx.ThetaCycles)-1-pt.Com.DelLen {
return
}
- sendVal *= Neurons[ni, di, Burst] // Burst is regular CaSpkP for all non-SuperLayer neurons
+ sendVal *= Neurons[ni, di, Burst] // Burst is regular CaP for all non-SuperLayer neurons
} else {
if Neurons[ni, di, Spike] == 0 {
return
diff --git a/axon/act.go b/axon/act.go
index cb92bc11c..f72bd5372 100644
--- a/axon/act.go
+++ b/axon/act.go
@@ -740,7 +740,7 @@ type ActParams struct {
// M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes
Mahp chans.MahpParams `display:"inline"`
- // slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron
+ // slow time-scale afterhyperpolarization sAHP current -- integrates CaD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron
Sahp chans.SahpParams `display:"inline"`
// sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)
@@ -853,15 +853,15 @@ func (ac *ActParams) DecayLearnCa(ctx *Context, ni, di uint32, decay float32) {
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(VgccCa)), int(ni), int(di), int(VgccCa))
Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(VgccCaInt)), int(ni), int(di), int(VgccCaInt))
- Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(CaLrn)), int(ni), int(di), int(CaLrn))
+ Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(LearnCa)), int(ni), int(di), int(LearnCa))
- Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(CaSpkM)), int(ni), int(di), int(CaSpkM))
- Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(CaSpkP)), int(ni), int(di), int(CaSpkP))
- Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(CaSpkD)), int(ni), int(di), int(CaSpkD))
+ Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(CaM)), int(ni), int(di), int(CaM))
+ Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(CaP)), int(ni), int(di), int(CaP))
+ Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(CaD)), int(ni), int(di), int(CaD))
- Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(NrnCaM)), int(ni), int(di), int(NrnCaM))
- Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(NrnCaP)), int(ni), int(di), int(NrnCaP))
- Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(NrnCaD)), int(ni), int(di), int(NrnCaD))
+ Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(LearnCaM)), int(ni), int(di), int(LearnCaM))
+ Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(LearnCaP)), int(ni), int(di), int(LearnCaP))
+ Neurons.SetSub(decay*Neurons.Value(int(ni), int(di), int(LearnCaD)), int(ni), int(di), int(LearnCaD))
// recovers
Neurons.SetAdd(decay*(1.0-Neurons.Value(int(ni), int(di), int(SKCaIn))), int(ni), int(di), int(SKCaIn))
@@ -1053,8 +1053,8 @@ func (ac *ActParams) InitActs(ctx *Context, ni, di uint32) {
// (DecayState is used instead)
func (ac *ActParams) InitLongActs(ctx *Context, ni, di uint32) {
Neurons.Set(0, int(ni), int(di), int(SpkPrv))
- Neurons.Set(0, int(ni), int(di), int(SpkSt1))
- Neurons.Set(0, int(ni), int(di), int(SpkSt2))
+ Neurons.Set(0, int(ni), int(di), int(Beta1))
+ Neurons.Set(0, int(ni), int(di), int(Beta2))
Neurons.Set(0, int(ni), int(di), int(ActM))
Neurons.Set(0, int(ni), int(di), int(ActP))
}
@@ -1168,7 +1168,7 @@ func (ac *ActParams) GSkCaFromCa(ctx *Context, ni, di uint32) {
skcar := Neurons.Value(int(ni), int(di), int(SKCaR))
skcain := Neurons.Value(int(ni), int(di), int(SKCaIn))
Neurons.Set(ac.SKCa.MFromCa(skcar, Neurons.Value(int(ni), int(di), int(SKCaM))), int(ni), int(di), int(SKCaM))
- ac.SKCa.CaInRFromSpike(Neurons.Value(int(ni), int(di), int(Spike)), Neurons.Value(int(ni), int(di), int(CaSpkD)), &skcain, &skcar)
+ ac.SKCa.CaInRFromSpike(Neurons.Value(int(ni), int(di), int(Spike)), Neurons.Value(int(ni), int(di), int(CaD)), &skcain, &skcar)
Neurons.Set(skcar, int(ni), int(di), int(SKCaR))
Neurons.Set(skcain, int(ni), int(di), int(SKCaIn))
Neurons.Set(ac.SKCa.Gbar*Neurons.Value(int(ni), int(di), int(SKCaM)), int(ni), int(di), int(Gsk))
diff --git a/axon/act.goal b/axon/act.goal
index 193e5afb3..9c6b10c86 100644
--- a/axon/act.goal
+++ b/axon/act.goal
@@ -738,7 +738,7 @@ type ActParams struct {
// M-type medium time-scale afterhyperpolarization mAHP current -- this is the primary form of adaptation on the time scale of multiple sequences of spikes
Mahp chans.MahpParams `display:"inline"`
- // slow time-scale afterhyperpolarization sAHP current -- integrates CaSpkD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron
+ // slow time-scale afterhyperpolarization sAHP current -- integrates CaD at theta cycle intervals and produces a hard cutoff on sustained activity for any neuron
Sahp chans.SahpParams `display:"inline"`
// sodium-gated potassium channel adaptation parameters -- activates a leak-like current as a function of neural activity (firing = Na influx) at two different time-scales (Slick = medium, Slack = slow)
@@ -851,15 +851,15 @@ func (ac *ActParams) DecayLearnCa(ctx *Context, ni, di uint32, decay float32) {
Neurons[ni, di, VgccCa] -= decay * Neurons[ni, di, VgccCa]
Neurons[ni, di, VgccCaInt] -= decay * Neurons[ni, di, VgccCaInt]
- Neurons[ni, di, CaLrn] -= decay * Neurons[ni, di, CaLrn]
+ Neurons[ni, di, LearnCa] -= decay * Neurons[ni, di, LearnCa]
- Neurons[ni, di, CaSpkM] -= decay * Neurons[ni, di, CaSpkM]
- Neurons[ni, di, CaSpkP] -= decay * Neurons[ni, di, CaSpkP]
- Neurons[ni, di, CaSpkD] -= decay * Neurons[ni, di, CaSpkD]
+ Neurons[ni, di, CaM] -= decay * Neurons[ni, di, CaM]
+ Neurons[ni, di, CaP] -= decay * Neurons[ni, di, CaP]
+ Neurons[ni, di, CaD] -= decay * Neurons[ni, di, CaD]
- Neurons[ni, di, NrnCaM] -= decay * Neurons[ni, di, NrnCaM]
- Neurons[ni, di, NrnCaP] -= decay * Neurons[ni, di, NrnCaP]
- Neurons[ni, di, NrnCaD] -= decay * Neurons[ni, di, NrnCaD]
+ Neurons[ni, di, LearnCaM] -= decay * Neurons[ni, di, LearnCaM]
+ Neurons[ni, di, LearnCaP] -= decay * Neurons[ni, di, LearnCaP]
+ Neurons[ni, di, LearnCaD] -= decay * Neurons[ni, di, LearnCaD]
// recovers
Neurons[ni, di, SKCaIn] += decay * (1.0 - Neurons[ni, di, SKCaIn])
@@ -1051,8 +1051,8 @@ func (ac *ActParams) InitActs(ctx *Context, ni, di uint32) {
// (DecayState is used instead)
func (ac *ActParams) InitLongActs(ctx *Context, ni, di uint32) {
Neurons[ni, di, SpkPrv] = 0
- Neurons[ni, di, SpkSt1] = 0
- Neurons[ni, di, SpkSt2] = 0
+ Neurons[ni, di, Beta1] = 0
+ Neurons[ni, di, Beta2] = 0
Neurons[ni, di, ActM] = 0
Neurons[ni, di, ActP] = 0
}
@@ -1166,7 +1166,7 @@ func (ac *ActParams) GSkCaFromCa(ctx *Context, ni, di uint32) {
skcar := Neurons[ni, di, SKCaR]
skcain := Neurons[ni, di, SKCaIn]
Neurons[ni, di, SKCaM] = ac.SKCa.MFromCa(skcar, Neurons[ni, di, SKCaM])
- ac.SKCa.CaInRFromSpike(Neurons[ni, di, Spike], Neurons[ni, di, CaSpkD], &skcain, &skcar)
+ ac.SKCa.CaInRFromSpike(Neurons[ni, di, Spike], Neurons[ni, di, CaD], &skcain, &skcar)
Neurons[ni, di, SKCaR] = skcar
Neurons[ni, di, SKCaIn] = skcain
Neurons[ni, di, Gsk] = ac.SKCa.Gbar * Neurons[ni, di, SKCaM]
diff --git a/axon/basic_test.go b/axon/basic_test.go
index 003edf2fa..8890fe20d 100644
--- a/axon/basic_test.go
+++ b/axon/basic_test.go
@@ -288,7 +288,6 @@ func TestInitWeights(t *testing.T) {
nData := 3
testNet := newTestNet(nData)
inPats := newInPats()
- ctx := testNet.Context()
valMapA := make(map[string]float32)
valMapB := make(map[string]float32)
@@ -346,7 +345,6 @@ func TestInitWeights(t *testing.T) {
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
}
@@ -401,7 +399,11 @@ func TestGPUState(t *testing.T) {
}
func TestNetAct(t *testing.T) {
- NetActTest(t, Tol7, false)
+ NetActTest(t, Tol7, false, false) // gpu, chunked
+}
+
+func TestNetActChunked(t *testing.T) {
+ NetActTest(t, Tol7, false, true)
}
func TestNetActShort(t *testing.T) {
@@ -420,14 +422,13 @@ func TestGPUAct(t *testing.T) {
// for key values relative to known standards.
// Note: use NetDebugAct for printf debugging of all values --
// "this is only a test"
-func NetActTest(t *testing.T, tol float32, gpu bool) {
+func NetActTest(t *testing.T, tol float32, gpu, chunked bool) {
if gpu {
GPUInit()
UseGPU = true
}
testNet := newTestNet(1)
- ctx := testNet.Context()
testNet.InitExt()
inPats := newInPats()
@@ -483,13 +484,16 @@ func NetActTest(t *testing.T, tol float32, gpu bool) {
testNet.ApplyExts() // key now for GPU
for qtr := range 4 {
- for cyc := range cycPerQtr {
- _ = cyc
- testNet.Cycle(1, true)
+ if chunked {
+ testNet.Cycle(cycPerQtr, true)
+ } else {
+ for cyc := range cycPerQtr {
+ _ = cyc
+ testNet.Cycle(1, true)
+ }
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
@@ -553,7 +557,6 @@ func NetActTestShort(t *testing.T, tol float32, gpu bool) {
}
testNet := newTestNet(1)
- ctx := testNet.Context()
testNet.InitExt()
inPats := newInPats()
@@ -603,7 +606,6 @@ func NetActTestShort(t *testing.T, tol float32, gpu bool) {
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
@@ -774,7 +776,6 @@ func RunDebugAct(t *testing.T, testNet *Network, printValues bool, gpu bool, ini
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
}
@@ -911,12 +912,12 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) {
hidLay.UnitValues(&hidAct, "Act", 0)
hidLay.UnitValues(&hidGes, "Ge", 0)
hidLay.UnitValues(&hidGis, "Gi", 0)
- hidLay.UnitValues(&hidCaM, "NrnCaM", 0)
- hidLay.UnitValues(&hidCaP, "NrnCaP", 0)
- hidLay.UnitValues(&hidCaD, "NrnCaD", 0)
+ hidLay.UnitValues(&hidCaM, "LearnCaM", 0)
+ hidLay.UnitValues(&hidCaP, "LearnCaP", 0)
+ hidLay.UnitValues(&hidCaD, "LearnCaD", 0)
- outLay.UnitValues(&outCaP, "NrnCaP", 0)
- outLay.UnitValues(&outCaD, "NrnCaD", 0)
+ outLay.UnitValues(&outCaP, "LearnCaP", 0)
+ outLay.UnitValues(&outCaD, "LearnCaD", 0)
if printCycs {
fmt.Printf("pat: %v qtr: %v cyc: %v\nhid act: %v ges: %v gis: %v\nhid avgss: %v avgs: %v avgm: %v\nout avgs: %v avgm: %v\n", pi, qtr, ctx.Cycle, hidAct, hidGes, hidGis, hidCaM, hidCaP, hidCaD, outCaP, outCaD)
@@ -924,15 +925,14 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) {
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
- hidLay.UnitValues(&hidCaP, "NrnCaP", 0)
- hidLay.UnitValues(&hidCaD, "NrnCaD", 0)
+ hidLay.UnitValues(&hidCaP, "LearnCaP", 0)
+ hidLay.UnitValues(&hidCaD, "LearnCaD", 0)
- outLay.UnitValues(&outCaP, "NrnCaP", 0)
- outLay.UnitValues(&outCaD, "NrnCaD", 0)
+ outLay.UnitValues(&outCaP, "LearnCaP", 0)
+ outLay.UnitValues(&outCaD, "LearnCaD", 0)
if qtr == 3 {
didx := pi
@@ -1065,12 +1065,12 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) {
hidLay.UnitValues(&hidAct, "Act", 0)
hidLay.UnitValues(&hidGes, "Ge", 0)
hidLay.UnitValues(&hidGis, "Gi", 0)
- hidLay.UnitValues(&hidCaM, "NrnCaM", 0)
- hidLay.UnitValues(&hidCaP, "NrnCaP", 0)
- hidLay.UnitValues(&hidCaD, "NrnCaD", 0)
+ hidLay.UnitValues(&hidCaM, "LearnCaM", 0)
+ hidLay.UnitValues(&hidCaP, "LearnCaP", 0)
+ hidLay.UnitValues(&hidCaD, "LearnCaD", 0)
- outLay.UnitValues(&outCaP, "NrnCaP", 0)
- outLay.UnitValues(&outCaD, "NrnCaD", 0)
+ outLay.UnitValues(&outCaP, "LearnCaP", 0)
+ outLay.UnitValues(&outCaD, "LearnCaD", 0)
if printCycs {
fmt.Printf("pat: %v qtr: %v cyc: %v\nhid act: %v ges: %v gis: %v\nhid avgss: %v avgs: %v avgm: %v\nout avgs: %v avgm: %v\n", pi, qtr, ctx.Cycle, hidAct, hidGes, hidGis, hidCaM, hidCaP, hidCaD, outCaP, outCaD)
@@ -1078,15 +1078,14 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) {
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
- hidLay.UnitValues(&hidCaP, "NrnCaP", 0)
- hidLay.UnitValues(&hidCaD, "NrnCaD", 0)
+ hidLay.UnitValues(&hidCaP, "LearnCaP", 0)
+ hidLay.UnitValues(&hidCaD, "LearnCaD", 0)
- outLay.UnitValues(&outCaP, "NrnCaP", 0)
- outLay.UnitValues(&outCaD, "NrnCaD", 0)
+ outLay.UnitValues(&outCaP, "LearnCaP", 0)
+ outLay.UnitValues(&outCaD, "LearnCaD", 0)
if qtr == 3 {
didx := pi
@@ -1223,7 +1222,6 @@ func RunDebugLearn(t *testing.T, testNet *Network, printValues bool, gpu bool, i
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
}
@@ -1469,7 +1467,6 @@ func TestInhibAct(t *testing.T) {
}
if qtr == 2 {
inhibNet.MinusPhase()
- ctx.NewPhase(false)
inhibNet.PlusPhaseStart()
}
diff --git a/axon/basic_test.goal b/axon/basic_test.goal
index 64495f835..7ceaa51ed 100644
--- a/axon/basic_test.goal
+++ b/axon/basic_test.goal
@@ -288,7 +288,6 @@ func TestInitWeights(t *testing.T) {
nData := 3
testNet := newTestNet(nData)
inPats := newInPats()
- ctx := testNet.Context()
valMapA := make(map[string]float32)
valMapB := make(map[string]float32)
@@ -346,7 +345,6 @@ func TestInitWeights(t *testing.T) {
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
}
@@ -401,7 +399,11 @@ func TestGPUState(t *testing.T) {
}
func TestNetAct(t *testing.T) {
- NetActTest(t, Tol7, false)
+ NetActTest(t, Tol7, false, false) // gpu, chunked
+}
+
+func TestNetActChunked(t *testing.T) {
+ NetActTest(t, Tol7, false, true)
}
func TestNetActShort(t *testing.T) {
@@ -420,14 +422,13 @@ func TestGPUAct(t *testing.T) {
// for key values relative to known standards.
// Note: use NetDebugAct for printf debugging of all values --
// "this is only a test"
-func NetActTest(t *testing.T, tol float32, gpu bool) {
+func NetActTest(t *testing.T, tol float32, gpu, chunked bool) {
if gpu {
GPUInit()
UseGPU = true
}
testNet := newTestNet(1)
- ctx := testNet.Context()
testNet.InitExt()
inPats := newInPats()
@@ -483,13 +484,16 @@ func NetActTest(t *testing.T, tol float32, gpu bool) {
testNet.ApplyExts() // key now for GPU
for qtr := range 4 {
- for cyc := range cycPerQtr {
- _ = cyc
- testNet.Cycle(1, true)
+ if chunked {
+ testNet.Cycle(cycPerQtr, true)
+ } else {
+ for cyc := range cycPerQtr {
+ _ = cyc
+ testNet.Cycle(1, true)
+ }
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
@@ -553,7 +557,6 @@ func NetActTestShort(t *testing.T, tol float32, gpu bool) {
}
testNet := newTestNet(1)
- ctx := testNet.Context()
testNet.InitExt()
inPats := newInPats()
@@ -603,7 +606,6 @@ func NetActTestShort(t *testing.T, tol float32, gpu bool) {
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
@@ -774,7 +776,6 @@ func RunDebugAct(t *testing.T, testNet *Network, printValues bool, gpu bool, ini
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
}
@@ -911,12 +912,12 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) {
hidLay.UnitValues(&hidAct, "Act", 0)
hidLay.UnitValues(&hidGes, "Ge", 0)
hidLay.UnitValues(&hidGis, "Gi", 0)
- hidLay.UnitValues(&hidCaM, "NrnCaM", 0)
- hidLay.UnitValues(&hidCaP, "NrnCaP", 0)
- hidLay.UnitValues(&hidCaD, "NrnCaD", 0)
+ hidLay.UnitValues(&hidCaM, "LearnCaM", 0)
+ hidLay.UnitValues(&hidCaP, "LearnCaP", 0)
+ hidLay.UnitValues(&hidCaD, "LearnCaD", 0)
- outLay.UnitValues(&outCaP, "NrnCaP", 0)
- outLay.UnitValues(&outCaD, "NrnCaD", 0)
+ outLay.UnitValues(&outCaP, "LearnCaP", 0)
+ outLay.UnitValues(&outCaD, "LearnCaD", 0)
if printCycs {
fmt.Printf("pat: %v qtr: %v cyc: %v\nhid act: %v ges: %v gis: %v\nhid avgss: %v avgs: %v avgm: %v\nout avgs: %v avgm: %v\n", pi, qtr, ctx.Cycle, hidAct, hidGes, hidGis, hidCaM, hidCaP, hidCaD, outCaP, outCaD)
@@ -924,15 +925,14 @@ func NetTestLearn(t *testing.T, tol float32, gpu bool) {
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
- hidLay.UnitValues(&hidCaP, "NrnCaP", 0)
- hidLay.UnitValues(&hidCaD, "NrnCaD", 0)
+ hidLay.UnitValues(&hidCaP, "LearnCaP", 0)
+ hidLay.UnitValues(&hidCaD, "LearnCaD", 0)
- outLay.UnitValues(&outCaP, "NrnCaP", 0)
- outLay.UnitValues(&outCaD, "NrnCaD", 0)
+ outLay.UnitValues(&outCaP, "LearnCaP", 0)
+ outLay.UnitValues(&outCaD, "LearnCaD", 0)
if qtr == 3 {
didx := pi
@@ -1065,12 +1065,12 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) {
hidLay.UnitValues(&hidAct, "Act", 0)
hidLay.UnitValues(&hidGes, "Ge", 0)
hidLay.UnitValues(&hidGis, "Gi", 0)
- hidLay.UnitValues(&hidCaM, "NrnCaM", 0)
- hidLay.UnitValues(&hidCaP, "NrnCaP", 0)
- hidLay.UnitValues(&hidCaD, "NrnCaD", 0)
+ hidLay.UnitValues(&hidCaM, "LearnCaM", 0)
+ hidLay.UnitValues(&hidCaP, "LearnCaP", 0)
+ hidLay.UnitValues(&hidCaD, "LearnCaD", 0)
- outLay.UnitValues(&outCaP, "NrnCaP", 0)
- outLay.UnitValues(&outCaD, "NrnCaD", 0)
+ outLay.UnitValues(&outCaP, "LearnCaP", 0)
+ outLay.UnitValues(&outCaD, "LearnCaD", 0)
if printCycs {
fmt.Printf("pat: %v qtr: %v cyc: %v\nhid act: %v ges: %v gis: %v\nhid avgss: %v avgs: %v avgm: %v\nout avgs: %v avgm: %v\n", pi, qtr, ctx.Cycle, hidAct, hidGes, hidGis, hidCaM, hidCaP, hidCaD, outCaP, outCaD)
@@ -1078,15 +1078,14 @@ func NetTestRLRate(t *testing.T, tol float32, gpu bool) {
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
- hidLay.UnitValues(&hidCaP, "NrnCaP", 0)
- hidLay.UnitValues(&hidCaD, "NrnCaD", 0)
+ hidLay.UnitValues(&hidCaP, "LearnCaP", 0)
+ hidLay.UnitValues(&hidCaD, "LearnCaD", 0)
- outLay.UnitValues(&outCaP, "NrnCaP", 0)
- outLay.UnitValues(&outCaD, "NrnCaD", 0)
+ outLay.UnitValues(&outCaP, "LearnCaP", 0)
+ outLay.UnitValues(&outCaD, "LearnCaD", 0)
if qtr == 3 {
didx := pi
@@ -1223,7 +1222,6 @@ func RunDebugLearn(t *testing.T, testNet *Network, printValues bool, gpu bool, i
}
if qtr == 2 {
testNet.MinusPhase()
- ctx.NewPhase(false)
testNet.PlusPhaseStart()
}
}
@@ -1468,7 +1466,6 @@ func TestInhibAct(t *testing.T) {
}
if qtr == 2 {
inhibNet.MinusPhase()
- ctx.NewPhase(false)
inhibNet.PlusPhaseStart()
}
diff --git a/axon/context.go b/axon/context.go
index 0e78ac12b..890722f6c 100644
--- a/axon/context.go
+++ b/axon/context.go
@@ -108,12 +108,6 @@ func (ctx *Context) DataIndex(idx uint32) uint32 {
return idx % ctx.NData
}
-// NewPhase resets PhaseCycle = 0 and sets the plus phase as specified
-func (ctx *Context) NewPhase(plusPhase bool) {
- ctx.PhaseCycle = 0
- ctx.PlusPhase.SetBool(plusPhase)
-}
-
// CycleInc increments at the cycle level
func (ctx *Context) CycleInc() {
ctx.PhaseCycle++
@@ -134,6 +128,12 @@ func (ctx *Context) SlowInc() bool {
return true
}
+// PlusPhaseStart resets PhaseCycle = 0 and sets the plus phase to true.
+func (ctx *Context) PlusPhaseStart() {
+ ctx.PhaseCycle = 0
+ ctx.PlusPhase.SetBool(true)
+}
+
//gosl:end
// NewState resets counters at start of new state (trial) of processing.
diff --git a/axon/deep-layer.go b/axon/deep-layer.go
index 203e507a7..48856bb19 100644
--- a/axon/deep-layer.go
+++ b/axon/deep-layer.go
@@ -12,18 +12,18 @@ import (
//gosl:start deep_layers
// BurstParams determine how the 5IB Burst activation is computed from
-// CaSpkP integrated spiking values in Super layers -- thresholded.
+// CaP integrated spiking values in Super layers -- thresholded.
type BurstParams struct {
// Relative component of threshold on superficial activation value,
- // below which it does not drive Burst (and above which, Burst = CaSpkP).
+ // below which it does not drive Burst (and above which, Burst = CaP).
// This is the distance between the average and maximum activation values
// within layer (e.g., 0 = average, 1 = max). Overall effective threshold
// is MAX of relative and absolute thresholds.
ThrRel float32 `max:"1" default:"0.1"`
// Absolute component of threshold on superficial activation value,
- // below which it does not drive Burst (and above which, Burst = CaSpkP).
+ // below which it does not drive Burst (and above which, Burst = CaP).
// Overall effective threshold is MAX of relative and absolute thresholds.
ThrAbs float32 `min:"0" max:"1" default:"0.1"`
@@ -78,13 +78,13 @@ func (cp *CTParams) Defaults() {
// PulvParams provides parameters for how the plus-phase (outcome)
// state of Pulvinar thalamic relay cell neurons is computed from
-// the corresponding driver neuron Burst activation (or CaSpkP if not Super)
+// the corresponding driver neuron Burst activation (or CaP if not Super)
type PulvParams struct {
- // multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit.
+ // multiplier on driver input strength, multiplies CaP from driver layer to produce Ge excitatory input to Pulv unit.
DriveScale float32 `default:"0.1" min:"0.0"`
- // Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning.
+ // Level of Max driver layer CaP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning.
FullDriveAct float32 `default:"0.6" min:"0.01"`
// index of layer that generates the driving activity into this one -- set via SetBuildConfig(DriveLayName) setting
diff --git a/axon/enumgen.go b/axon/enumgen.go
index 99a5d53d8..c1f221fa3 100644
--- a/axon/enumgen.go
+++ b/axon/enumgen.go
@@ -250,7 +250,7 @@ const LayerVarsN LayerVars = 11
var _LayerVarsValueMap = map[string]LayerVars{`LayerActMAvg`: 0, `LayerActPAvg`: 1, `LayerAvgMaxGeM`: 2, `LayerAvgMaxGiM`: 3, `LayerGiMult`: 4, `LayerPhaseDiff`: 5, `LayerPhaseDiffAvg`: 6, `LayerPhaseDiffVar`: 7, `LayerRT`: 8, `LayerRewPredPos`: 9, `LayerRewPredNeg`: 10}
-var _LayerVarsDescMap = map[LayerVars]string{0: `LayerActMAvg is the running-average minus-phase activity integrated at Dt.LongAvgTau, used for adapting inhibition relative to target level.`, 1: `LayerActPAvg is the running-average plus-phase activity integrated at Dt.LongAvgTau.`, 2: `LayerAvgMaxGeM is the running-average max of minus-phase Ge value across the layer integrated at Dt.LongAvgTau.`, 3: `LayerAvgMaxGiM is the running-average max of minus-phase Gi value across the layer integrated at Dt.LongAvgTau.`, 4: `LayerGiMult is a multiplier on layer-level inhibition, which can be adapted to maintain target activity level.`, 5: `LayerPhaseDiff is the phase-wise difference in the activity state between the minus [ActM] and plus [ActP] phases, measured using 1 minus the correlation (centered cosine aka normalized dot product). 0 = no difference, 2 = maximum difference. Computed by PhaseDiffFromActs in the PlusPhase.`, 6: `LayerPhaseDiffAvg is the running average of [LayerPhaseDiff] over time, integrated at Dt.LongAvgTau.`, 7: `LayerPhaseDiffVar is the running variance of [LayerPhaseDiff], integrated at Dt.LongAvgTau.`, 8: `LayerRT is the reaction time for this layer in cycles, which is -1 until the Max CaSpkP level (after MaxCycStart) exceeds the Act.Attn.RTThr threshold.`, 9: `LayerRewPredPos is the positive-valued Reward Prediction value, for RL specific layers: [RWPredLayer], [TDPredLayer]. For [TDIntegLayer], this is the plus phase current integrated reward prediction.`, 10: `LayerRewPredNeg is the negative-valued Reward Prediction value, for RL specific layers: [RWPredLayer], [TDPredLayer] For [TDIntegLayer], this is the minus phase previous integrated reward prediction.`}
+var _LayerVarsDescMap = map[LayerVars]string{0: `LayerActMAvg is the running-average minus-phase activity integrated at Dt.LongAvgTau, used for adapting inhibition relative to target level.`, 1: `LayerActPAvg is the running-average plus-phase activity integrated at Dt.LongAvgTau.`, 2: `LayerAvgMaxGeM is the running-average max of minus-phase Ge value across the layer integrated at Dt.LongAvgTau.`, 3: `LayerAvgMaxGiM is the running-average max of minus-phase Gi value across the layer integrated at Dt.LongAvgTau.`, 4: `LayerGiMult is a multiplier on layer-level inhibition, which can be adapted to maintain target activity level.`, 5: `LayerPhaseDiff is the phase-wise difference in the activity state between the minus [ActM] and plus [ActP] phases, measured using 1 minus the correlation (centered cosine aka normalized dot product). 0 = no difference, 2 = maximum difference. Computed by PhaseDiffFromActs in the PlusPhase.`, 6: `LayerPhaseDiffAvg is the running average of [LayerPhaseDiff] over time, integrated at Dt.LongAvgTau.`, 7: `LayerPhaseDiffVar is the running variance of [LayerPhaseDiff], integrated at Dt.LongAvgTau.`, 8: `LayerRT is the reaction time for this layer in cycles, which is -1 until the Max CaP level (after MaxCycStart) exceeds the Act.Attn.RTThr threshold.`, 9: `LayerRewPredPos is the positive-valued Reward Prediction value, for RL specific layers: [RWPredLayer], [TDPredLayer]. For [TDIntegLayer], this is the plus phase current integrated reward prediction.`, 10: `LayerRewPredNeg is the negative-valued Reward Prediction value, for RL specific layers: [RWPredLayer], [TDPredLayer] For [TDIntegLayer], this is the minus phase previous integrated reward prediction.`}
var _LayerVarsMap = map[LayerVars]string{0: `LayerActMAvg`, 1: `LayerActPAvg`, 2: `LayerAvgMaxGeM`, 3: `LayerAvgMaxGiM`, 4: `LayerGiMult`, 5: `LayerPhaseDiff`, 6: `LayerPhaseDiffAvg`, 7: `LayerPhaseDiffVar`, 8: `LayerRT`, 9: `LayerRewPredPos`, 10: `LayerRewPredNeg`}
@@ -438,7 +438,7 @@ const NeuronFlagsN NeuronFlags = 9
var _NeuronFlagsValueMap = map[string]NeuronFlags{`NeuronOff`: 1, `NeuronHasExt`: 2, `NeuronHasTarg`: 4, `NeuronHasCmpr`: 8}
-var _NeuronFlagsDescMap = map[NeuronFlags]string{1: `NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned)`, 2: `NeuronHasExt means the neuron has external input in its Ext field`, 4: `NeuronHasTarg means the neuron has external target input in its Target field`, 8: `NeuronHasCmpr means the neuron has external comparison input in its Target field -- used for computing comparison statistics but does not drive neural activity ever`}
+var _NeuronFlagsDescMap = map[NeuronFlags]string{1: `NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned).`, 2: `NeuronHasExt means the neuron has external input in its Ext field.`, 4: `NeuronHasTarg means the neuron has external target input in its Target field.`, 8: `NeuronHasCmpr means the neuron has external comparison input in its Target field. Used for computing comparison statistics but does not drive neural activity ever.`}
var _NeuronFlagsMap = map[NeuronFlags]string{1: `NeuronOff`, 2: `NeuronHasExt`, 4: `NeuronHasTarg`, 8: `NeuronHasCmpr`}
@@ -474,20 +474,20 @@ func (i *NeuronFlags) UnmarshalText(text []byte) error {
return enums.UnmarshalText(i, text, "NeuronFlags")
}
-var _NeuronVarsValues = []NeuronVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89}
+var _NeuronVarsValues = []NeuronVars{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88}
// NeuronVarsN is the highest valid value for type NeuronVars, plus one.
//
//gosl:start
-const NeuronVarsN NeuronVars = 90
+const NeuronVarsN NeuronVars = 89
//gosl:end
-var _NeuronVarsValueMap = map[string]NeuronVars{`Spike`: 0, `Spiked`: 1, `Act`: 2, `ActInt`: 3, `Ge`: 4, `Gi`: 5, `Gk`: 6, `Inet`: 7, `Vm`: 8, `VmDend`: 9, `ISI`: 10, `ISIAvg`: 11, `Ext`: 12, `Target`: 13, `CaSpkM`: 14, `CaSpkP`: 15, `CaSpkD`: 16, `CaSpkPM`: 17, `CaLrn`: 18, `NrnCaM`: 19, `NrnCaP`: 20, `NrnCaD`: 21, `CaDiff`: 22, `RLRate`: 23, `GnmdaSyn`: 24, `Gnmda`: 25, `GnmdaLrn`: 26, `GnmdaMaint`: 27, `NmdaCa`: 28, `Gvgcc`: 29, `VgccM`: 30, `VgccH`: 31, `VgccCa`: 32, `VgccCaInt`: 33, `Burst`: 34, `BurstPrv`: 35, `CtxtGe`: 36, `CtxtGeRaw`: 37, `CtxtGeOrig`: 38, `GgabaB`: 39, `GABAB`: 40, `GABABx`: 41, `Gak`: 42, `SSGiDend`: 43, `GknaMed`: 44, `GknaSlow`: 45, `Gkir`: 46, `KirM`: 47, `Gsk`: 48, `SKCaIn`: 49, `SKCaR`: 50, `SKCaM`: 51, `Gmahp`: 52, `MahpN`: 53, `Gsahp`: 54, `SahpCa`: 55, `SahpN`: 56, `ActM`: 57, `ActP`: 58, `SpkSt1`: 59, `SpkSt2`: 60, `SpkMax`: 61, `SpkMaxCa`: 62, `SpkBin0`: 63, `SpkBin1`: 64, `SpkBin2`: 65, `SpkBin3`: 66, `SpkBin4`: 67, `SpkBin5`: 68, `SpkBin6`: 69, `SpkBin7`: 70, `SpkPrv`: 71, `GeNoise`: 72, `GeNoiseP`: 73, `GiNoise`: 74, `GiNoiseP`: 75, `GeExt`: 76, `GeRaw`: 77, `GeSyn`: 78, `GiRaw`: 79, `GiSyn`: 80, `GeInt`: 81, `GeIntNorm`: 82, `GiInt`: 83, `GModRaw`: 84, `GModSyn`: 85, `SMaintP`: 86, `GMaintRaw`: 87, `GMaintSyn`: 88, `NeurFlags`: 89}
+var _NeuronVarsValueMap = map[string]NeuronVars{`Spike`: 0, `Spiked`: 1, `Act`: 2, `ActInt`: 3, `Ge`: 4, `Gi`: 5, `Gk`: 6, `Inet`: 7, `Vm`: 8, `VmDend`: 9, `ISI`: 10, `ISIAvg`: 11, `Ext`: 12, `Target`: 13, `CaM`: 14, `CaP`: 15, `CaD`: 16, `LearnCa`: 17, `LearnCaM`: 18, `LearnCaP`: 19, `LearnCaD`: 20, `CaDiff`: 21, `RLRate`: 22, `GnmdaSyn`: 23, `Gnmda`: 24, `GnmdaLrn`: 25, `GnmdaMaint`: 26, `NmdaCa`: 27, `Gvgcc`: 28, `VgccM`: 29, `VgccH`: 30, `VgccCa`: 31, `VgccCaInt`: 32, `Burst`: 33, `BurstPrv`: 34, `CtxtGe`: 35, `CtxtGeRaw`: 36, `CtxtGeOrig`: 37, `GgabaB`: 38, `GABAB`: 39, `GABABx`: 40, `Gak`: 41, `SSGiDend`: 42, `GknaMed`: 43, `GknaSlow`: 44, `Gkir`: 45, `KirM`: 46, `Gsk`: 47, `SKCaIn`: 48, `SKCaR`: 49, `SKCaM`: 50, `Gmahp`: 51, `MahpN`: 52, `Gsahp`: 53, `SahpCa`: 54, `SahpN`: 55, `ActM`: 56, `ActP`: 57, `Beta1`: 58, `Beta2`: 59, `SpkMax`: 60, `SpkMaxCa`: 61, `SpkBin0`: 62, `SpkBin1`: 63, `SpkBin2`: 64, `SpkBin3`: 65, `SpkBin4`: 66, `SpkBin5`: 67, `SpkBin6`: 68, `SpkBin7`: 69, `SpkPrv`: 70, `GeNoise`: 71, `GeNoiseP`: 72, `GiNoise`: 73, `GiNoiseP`: 74, `GeExt`: 75, `GeRaw`: 76, `GeSyn`: 77, `GiRaw`: 78, `GiSyn`: 79, `GeInt`: 80, `GeIntNorm`: 81, `GiInt`: 82, `GModRaw`: 83, `GModSyn`: 84, `SMaintP`: 85, `GMaintRaw`: 86, `GMaintSyn`: 87, `NeurFlags`: 88}
-var _NeuronVarsDescMap = map[NeuronVars]string{0: `Spike is whether neuron has spiked or not on this cycle (0 or 1)`, 1: `Spiked is 1 if neuron has spiked within the last 10 cycles (msecs), corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise -- useful for visualization and computing activity levels in terms of average spiked levels.`, 2: `Act is rate-coded activation value reflecting instantaneous estimated rate of spiking, based on 1 / ISIAvg. This drives feedback inhibition in the FFFB function (todo: this will change when better inhibition is implemented), and is integrated over time for ActInt which is then used for performance statistics and layer average activations, etc. Should not be used for learning or other computations.`, 3: `ActInt is integrated running-average activation value computed from Act with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall activation state across the ThetaCycle time scale, as the overall response of network to current input state -- this is copied to ActM and ActP at the ends of the minus and plus phases, respectively, and used in computing performance-level statistics (which are typically based on ActM). Should not be used for learning or other computations.`, 4: `Ge is total excitatory conductance, including all forms of excitation (e.g., NMDA) -- does *not* include Gbar.E`, 5: `Gi is total inhibitory synaptic conductance -- the net inhibitory input to the neuron -- does *not* include Gbar.I`, 6: `Gk is total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects -- does *not* include Gbar.K`, 7: `Inet is net current produced by all channels -- drives update of Vm`, 8: `Vm is membrane potential -- integrates Inet current over time`, 9: `VmDend is dendritic membrane potential -- has a slower time constant, is not subject to the VmR reset after spiking`, 10: `ISI is current inter-spike-interval -- counts up since last spike. Starts at -1 when initialized.`, 11: `ISIAvg is average inter-spike-interval -- average time interval between spikes, integrated with ISITau rate constant (relatively fast) to capture something close to an instantaneous spiking rate. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization.`, 12: `Ext is external input: drives activation of unit from outside influences (e.g., sensory input)`, 13: `Target is the target value: drives learning to produce this activation value`, 14: `CaSpkM is spike-driven calcium trace used as a neuron-level proxy for synpatic credit assignment factor based on continuous time-integrated spiking: exponential integration of SpikeG * Spike at MTau time constant (typically 5). Simulates a calmodulin (CaM) like signal at the most abstract level.`, 15: `CaSpkP is continuous cascaded integration of CaSpkM at PTau time constant (typically 40), representing neuron-level purely spiking version of plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.`, 16: `CaSpkD is continuous cascaded integration CaSpkP at DTau time constant (typically 40), representing neuron-level purely spiking version of minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.`, 17: `CaSpkPM is minus-phase snapshot of the CaSpkP value -- similar to ActM but using a more directly spike-integrated value.`, 18: `CaLrn is recv neuron calcium signal used to drive temporal error difference component of standard learning rule, combining NMDA (NmdaCa) and spiking-driven VGCC (VgccCaInt) calcium sources (vs. CaSpk* which only reflects spiking component). This is integrated into CaM, CaP, CaD, and temporal derivative is CaP - CaD (CaMKII - DAPK1). This approximates the backprop error derivative on net input, but VGCC component adds a proportion of recv activation delta as well -- a balance of both works best. The synaptic-level trace multiplier provides the credit assignment factor, reflecting coincident activity and potentially integrated over longer multi-trial timescales.`, 19: `NrnCaM is integrated CaLrn at MTau timescale (typically 5), simulating a calmodulin (CaM) like signal, which then drives CaP, CaD for delta signal driving error-driven learning.`, 20: `NrnCaP is cascaded integration of CaM at PTau time constant (typically 40), representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule.`, 21: `NrnCaD is cascaded integratoin of CaP at DTau time constant (typically 40), representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule.`, 22: `CaDiff is difference between CaP - CaD -- this is the error signal that drives error-driven learning.`, 23: `RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from the CaSpkD of recv unit, and the normalized difference CaSpkP - CaSpkD / MAX(CaSpkP - CaSpkD).`, 24: `GnmdaSyn is integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant`, 25: `Gnmda is net postsynaptic (recv) NMDA conductance, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential`, 26: `GnmdaLrn is learning version of integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant -- drives NmdaCa that then drives CaM for learning`, 27: `GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from GMaintSyn and GMaintRaw, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential`, 28: `NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM`, 29: `Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels`, 30: `VgccM is activation gate of VGCC channels`, 31: `VgccH inactivation gate of VGCC channels`, 32: `VgccCa is instantaneous VGCC calcium flux -- can be driven by spiking or directly from Gvgcc`, 33: `VgccCaInt time-integrated VGCC calcium flux -- this is actually what drives learning`, 34: `Burst is 5IB bursting activation value, computed by thresholding regular CaSpkP value in Super superficial layers`, 35: `BurstPrv is previous Burst bursting activation from prior time step -- used for context-based learning`, 36: `CtxtGe is context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 37: `CtxtGeRaw is raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 38: `CtxtGeOrig is original CtxtGe value prior to any decay factor -- updates at end of plus phase.`, 39: `GgabaB is net GABA-B conductance, after Vm gating and Gbar + Gbase -- applies to Gk, not Gi, for GIRK, with .1 reversal potential.`, 40: `GABAB is GABA-B / GIRK activation -- time-integrated value with rise and decay time constants`, 41: `GABABx is GABA-B / GIRK internal drive variable -- gets the raw activation and decays`, 42: `Gak is conductance of A-type K potassium channels`, 43: `SSGiDend is amount of SST+ somatostatin positive slow spiking inhibition applied to dendritic Vm (VmDend)`, 44: `GknaMed is conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick), which produces accommodation / adaptation of firing`, 45: `GknaSlow is conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack), which produces accommodation / adaptation of firing`, 46: `Gkir is the conductance of the potassium (K) inwardly rectifying channel, which is strongest at low membrane potentials. Can be modulated by DA.`, 47: `KirM is the Kir potassium (K) inwardly rectifying gating value`, 48: `Gsk is Calcium-gated potassium channel conductance as a function of Gbar * SKCaM.`, 49: `SKCaIn is intracellular calcium store level, available to be released with spiking as SKCaR, which can bind to SKCa receptors and drive K current. replenishment is a function of spiking activity being below a threshold`, 50: `SKCaR released amount of intracellular calcium, from SKCaIn, as a function of spiking events. this can bind to SKCa channels and drive K currents.`, 51: `SKCaM is Calcium-gated potassium channel gating factor, driven by SKCaR via a Hill equation as in chans.SKPCaParams.`, 52: `Gmahp is medium time scale AHP conductance`, 53: `MahpN is accumulating voltage-gated gating value for the medium time scale AHP`, 54: `Gsahp is slow time scale AHP conductance`, 55: `SahpCa is slowly accumulating calcium value that drives the slow AHP`, 56: `SahpN is the sAHP gating value`, 57: `ActM is ActInt activation state at end of third quarter, representing the posterior-cortical minus phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 58: `ActP is ActInt activation state at end of fourth quarter, representing the posterior-cortical plus_phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 59: `SpkSt1 is the activation state at specific time point within current state processing window (e.g., 50 msec for beta cycle within standard theta cycle), as saved by SpkSt1() function. Used for example in hippocampus for CA3, CA1 learning`, 60: `SpkSt2 is the activation state at specific time point within current state processing window (e.g., 100 msec for beta cycle within standard theta cycle), as saved by SpkSt2() function. Used for example in hippocampus for CA3, CA1 learning`, 61: `SpkMax is maximum CaSpkP across one theta cycle time window (max of SpkMaxCa) -- used for specialized algorithms that have more phasic behavior within a single trial, e.g., BG Matrix layer gating. Also useful for visualization of peak activity of neurons.`, 62: `SpkMaxCa is Ca integrated like CaSpkP but only starting at MaxCycStart cycle, to prevent inclusion of carryover spiking from prior theta cycle trial -- the PTau time constant otherwise results in significant carryover. This is the input to SpkMax`, 63: `SpkBin has aggregated spikes within 50 msec bins across the theta cycle, for computing synaptic calcium efficiently`, 64: ``, 65: ``, 66: ``, 67: ``, 68: ``, 69: ``, 70: ``, 71: `SpkPrv is final CaSpkD activation state at end of previous theta cycle. used for specialized learning mechanisms that operate on delayed sending activations.`, 72: `GeNoise is integrated noise excitatory conductance, added into Ge`, 73: `GeNoiseP is accumulating poisson probability factor for driving excitatory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as function of noise firing rate.`, 74: `GiNoise is integrated noise inhibotyr conductance, added into Gi`, 75: `GiNoiseP is accumulating poisson probability factor for driving inhibitory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as a function of noise firing rate.`, 76: `GeExt is extra excitatory conductance added to Ge -- from Ext input, GeCtxt etc`, 77: `GeRaw is raw excitatory conductance (net input) received from senders = current raw spiking drive`, 78: `GeSyn is time-integrated total excitatory synaptic conductance, with an instantaneous rise time from each spike (in GeRaw) and exponential decay with Dt.GeTau, aggregated over pathways -- does *not* include Gbar.E`, 79: `GiRaw is raw inhibitory conductance (net input) received from senders = current raw spiking drive`, 80: `GiSyn is time-integrated total inhibitory synaptic conductance, with an instantaneous rise time from each spike (in GiRaw) and exponential decay with Dt.GiTau, aggregated over pathways -- does *not* include Gbar.I. This is added with computed FFFB inhibition to get the full inhibition in Gi`, 81: `GeInt is integrated running-average activation value computed from Ge with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall Ge level across the ThetaCycle time scale (Ge itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall excitatory drive`, 82: `GeIntNorm is normalized GeInt value (divided by the layer maximum) -- this is used for learning in layers that require learning on subthreshold activity`, 83: `GiInt is integrated running-average activation value computed from GiSyn with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall synaptic Gi level across the ThetaCycle time scale (Gi itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall inhibitory drive`, 84: `GModRaw is raw modulatory conductance, received from GType = ModulatoryG pathways`, 85: `GModSyn is syn integrated modulatory conductance, received from GType = ModulatoryG pathways`, 86: `SMaintP is accumulating poisson probability factor for driving self-maintenance by simulating a population of mutually interconnected neurons. multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda based on accumulating self maint factor`, 87: `GMaintRaw is raw maintenance conductance, received from GType = MaintG pathways`, 88: `GMaintSyn is syn integrated maintenance conductance, integrated using MaintNMDA params.`, 89: `NeurFlags are bit flags for binary state variables, which are converted to / from uint32. These need to be in Vars because they can be differential per data (for ext inputs) and are writable (indexes are read only).`}
+var _NeuronVarsDescMap = map[NeuronVars]string{0: `Spike is whether neuron has spiked or not on this cycle (0 or 1).`, 1: `Spiked is 1 if neuron has spiked within the last 10 cycles (msecs), corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise. Useful for visualization and computing activity levels in terms of average spiked levels.`, 2: `Act is rate-coded activation value reflecting instantaneous estimated rate of spiking, based on 1 / ISIAvg. It is integrated over time for ActInt which is then used for performance statistics and layer average activations, etc. Should not be used for learning or other computations: just for stats / display.`, 3: `ActInt is integrated running-average activation value computed from Act with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall activation state across the ThetaCycle time scale, as the overall response of network to current input state. This is copied to ActM and ActP at the ends of the minus and plus phases, respectively, and used in computing some performance-level statistics (based on ActM). Should not be used for learning or other computations.`, 4: `Ge is total excitatory conductance, including all forms of excitation (e.g., NMDA). Does *not* include the Gbar.E factor.`, 5: `Gi is total inhibitory synaptic conductance, i.e., the net inhibitory input to the neuron. Does *not* include the Gbar.I factor.`, 6: `Gk is total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects. Does *not* include the Gbar.K factor.`, 7: `Inet is net current produced by all channels, which drives update of Vm.`, 8: `Vm is the membrane potential at the cell body, which integrates Inet current over time, and drives spiking at the axon initial segment of the neuron.`, 9: `VmDend is the dendritic membrane potential, which has a slower time constant than Vm and is not subject to the VmR reset after spiking.`, 10: `ISI is the current inter-spike-interval, which counts up since last spike. Starts at -1 when initialized.`, 11: `ISIAvg is the average inter-spike-interval, i.e., the average time interval between spikes, integrated with ISITau rate constant (relatively fast) to capture something close to an instantaneous spiking rate. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization.`, 12: `Ext is the external input: drives activation of unit from outside influences (e.g., sensory input).`, 13: `Target is the target value: drives learning to produce this activation value.`, 14: `CaM is the spike-driven calcium trace at the neuron level, which then drives longer time-integrated variables: [CaP] and [CaD]. These variables are used for statistics and display to capture spiking activity at different timescales. They fluctuate more than [Act] and [ActInt], but are closer to the biological variables driving learning. CaM is the exponential integration of SpikeG * Spike using the MTau time constant (typically 5), and simulates a calmodulin (CaM) like signal, at an abstract level.`, 15: `CaP is the continuous cascaded integration of [CaM] using the PTau time constant (typically 40), representing a neuron-level, purely spiking version of the plus, LTP direction of weight change in the Kinase learning rule, dependent on CaMKII. This is not used for learning (see [LearnCaP]), but instead for statistics as a representation of recent activity.`, 16: `CaD is the continuous cascaded integration [CaP] using the DTau time constant (typically 40), representing a neuron-level, purely spiking version of the minus, LTD direction of weight change in the Kinase learning rule, dependent on DAPK1. This is not used for learning (see [LearnCaD]), but instead for statistics as a representation of trial-level activity.`, 17: `LearnCa is the receiving neuron calcium signal, which is integrated up to [LearnCaP] and [LearnCaD], the difference of which is the temporal error component of the standard axon cortical learning rule. LearnCa combines NMDA via [NmdaCa] and spiking-driven VGCC [VgccCaInt] calcium sources (vs. CaM which only reflects a simple spiking component). The NMDA signal reflects both sending and receiving activity, while the VGCC signal is purely receiver spiking, and a balance of both works best. The synaptic-level trace factor computed from the SpkBin variables on both sender and receiver provides the credit assignment factor, reflecting coincident activity, which can be integrated over longer multi-trial timescales.`, 18: `LearnCaM is the integrated [LearnCa] at the MTau timescale (typically 5), simulating a calmodulin (CaM) like signal, which then drives [LearnCaP], and [LearnCaD] for the delta signal for error-driven learning.`, 19: `LearnCaP is the cascaded integration of [LearnCaM] using the PTau time constant (typically 40), representing the plus, LTP direction of weight change, capturing the function of CaMKII in the Kinase learning rule.`, 20: `LearnCaD is the cascaded integration of [LearnCaP] using the DTau time constant (typically 40), representing the minus, LTD direction of weight change, capturing the function of DAPK1 in the Kinase learning rule.`, 21: `CaDiff is difference between [LearnCaP] - [LearnCaD]. This is the error signal that drives error-driven learning.`, 22: `RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from [CaD] of recv unit, and the normalized difference (CaP - CaD) / MAX(CaP - CaD).`, 23: `GnmdaSyn is the integrated NMDA synaptic current on the receiving neuron. It adds GeRaw and decays with a time constant.`, 24: `Gnmda is the net postsynaptic (receiving) NMDA conductance, after Mg V-gating and Gbar. This is added directly to Ge as it has the same reversal potential.`, 25: `GnmdaLrn is learning version of integrated NMDA recv synaptic current. It adds [GeRaw] and decays with a time constant. This drives [NmdaCa] that then drives [LearnCa] for learning.`, 26: `GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from [GMaintSyn] and [GMaintRaw], after Mg V-gating and Gbar. This is added directly to Ge as it has the same reversal potential.`, 27: `NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM.`, 28: `Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels.`, 29: `VgccM is activation gate of VGCC channels.`, 30: `VgccH inactivation gate of VGCC channels.`, 31: `VgccCa is the instantaneous VGCC calcium flux: can be driven by spiking or directly from Gvgcc.`, 32: `VgccCaInt is the time-integrated VGCC calcium flux. This is actually what drives learning.`, 33: `Burst is the layer 5 IB intrinsic bursting neural activation value, computed by thresholding the [CaP] value in Super superficial layers.`, 34: `BurstPrv is previous Burst bursting activation from prior time step. Used for context-based learning.`, 35: `CtxtGe is context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 36: `CtxtGeRaw is raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.`, 37: `CtxtGeOrig is original CtxtGe value prior to any decay factor. Updates at end of plus phase.`, 38: `GgabaB is net GABA-B conductance, after Vm gating and Gbar + Gbase. Applies to Gk, not Gi, for GIRK, with .1 reversal potential.`, 39: `GABAB is GABA-B / GIRK activation, which is a time-integrated value with rise and decay time constants.`, 40: `GABABx is GABA-B / GIRK internal drive variable. This gets the raw activation and decays.`, 41: `Gak is the conductance of A-type K potassium channels.`, 42: `SSGiDend is the amount of SST+ somatostatin positive slow spiking inhibition applied to dendritic Vm (VmDend).`, 43: `GknaMed is the conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick), which produces accommodation / adaptation.`, 44: `GknaSlow is the conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack), which produces accommodation / adaptation.`, 45: `Gkir is the conductance of the potassium (K) inwardly rectifying channel, which is strongest at low membrane potentials. Can be modulated by DA.`, 46: `KirM is the Kir potassium (K) inwardly rectifying gating value.`, 47: `Gsk is Calcium-gated potassium channel conductance as a function of Gbar * SKCaM.`, 48: `SKCaIn is intracellular calcium store level, available to be released with spiking as SKCaR, which can bind to SKCa receptors and drive K current. replenishment is a function of spiking activity being below a threshold.`, 49: `SKCaR is the released amount of intracellular calcium, from SKCaIn, as a function of spiking events. This can bind to SKCa channels and drive K currents.`, 50: `SKCaM is the Calcium-gated potassium channel gating factor, driven by SKCaR via a Hill equation as in chans.SKPCaParams.`, 51: `Gmahp is medium time scale AHP conductance.`, 52: `MahpN is accumulating voltage-gated gating value for the medium time scale AHP.`, 53: `Gsahp is slow time scale AHP conductance.`, 54: `SahpCa is slowly accumulating calcium value that drives the slow AHP.`, 55: `SahpN is the sAHP gating value.`, 56: `ActM is ActInt activation state at end of third quarter, representing the posterior-cortical minus phase activation. This is used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 57: `ActP is ActInt activation state at end of fourth quarter, representing the posterior-cortical plus_phase activation. This is used for statistics and monitoring network performance. Should not be used for learning or other computations.`, 58: `Beta1 is the activation state at the first beta cycle within current state processing window (i.e., at 50 msec), as saved by Beta1() function. Used for example in hippocampus for CA3, CA1 learning.`, 59: `Beta2 is the activation state at the second beta cycle within current state processing window (i.e., at 100 msec), as saved by Beta2() function. Used for example in hippocampus for CA3, CA1 learning.`, 60: `SpkMax is the maximum [CaP] across one theta cycle time window (max of SpkMaxCa). It is used for specialized algorithms that have more phasic behavior within a single trial, e.g., BG Matrix layer gating. Also useful for visualization of peak activity of neurons.`, 61: `SpkMaxCa is the Ca integrated like [CaP] but only starting at the MaxCycStart cycle, to prevent inclusion of carryover spiking from prior theta cycle trial. The PTau time constant otherwise results in significant carryover. This is the input to SpkMax.`, 62: `SpkBin has aggregated spikes within 50 msec bins across the theta cycle, for computing synaptic calcium efficiently.`, 63: ``, 64: ``, 65: ``, 66: ``, 67: ``, 68: ``, 69: ``, 70: `SpkPrv is the final [CaD] activation state at end of previous theta cycle. This is used for specialized learning mechanisms that operate on delayed sending activations.`, 71: `GeNoise is integrated noise excitatory conductance, added into Ge.`, 72: `GeNoiseP is accumulating poisson probability factor for driving excitatory noise spiking. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as function of noise firing rate.`, 73: `GiNoise is integrated noise inhibotyr conductance, added into Gi.`, 74: `GiNoiseP is accumulating poisson probability factor for driving inhibitory noise spiking. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as a function of noise firing rate.`, 75: `GeExt is extra excitatory conductance added to Ge, from Ext input, GeCtxt etc.`, 76: `GeRaw is the raw excitatory conductance (net input) received from senders = current raw spiking drive.`, 77: `GeSyn is the time-integrated total excitatory synaptic conductance, with an instantaneous rise time from each spike (in GeRaw) and exponential decay with Dt.GeTau, aggregated over pathways. Does *not* include Gbar.E.`, 78: `GiRaw is the raw inhibitory conductance (net input) received from senders = current raw spiking drive.`, 79: `GiSyn is time-integrated total inhibitory synaptic conductance, with an instantaneous rise time from each spike (in GiRaw) and exponential decay with Dt.GiTau, aggregated over pathways -- does *not* include Gbar.I. This is added with computed FFFB inhibition to get the full inhibition in Gi.`, 80: `GeInt is integrated running-average activation value computed from Ge with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall Ge level across the ThetaCycle time scale (Ge itself fluctuates considerably). This is useful for stats to set strength of connections etc to get neurons into right range of overall excitatory drive.`, 81: `GeIntNorm is normalized GeInt value (divided by the layer maximum). This is used for learning in layers that require learning on subthreshold activity.`, 82: `GiInt is integrated running-average activation value computed from GiSyn with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall synaptic Gi level across the ThetaCycle time scale (Gi itself fluctuates considerably). Useful for stats to set strength of connections etc to get neurons into right range of overall inhibitory drive.`, 83: `GModRaw is raw modulatory conductance, received from GType = ModulatoryG pathways.`, 84: `GModSyn is syn integrated modulatory conductance, received from GType = ModulatoryG pathways.`, 85: `SMaintP is accumulating poisson probability factor for driving self-maintenance by simulating a population of mutually interconnected neurons. Multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda based on accumulating self maint factor.`, 86: `GMaintRaw is raw maintenance conductance, received from GType = MaintG pathways.`, 87: `GMaintSyn is syn integrated maintenance conductance, integrated using MaintNMDA params.`, 88: `NeurFlags are bit flags for binary state variables, which are converted to / from uint32. These need to be in Vars because they can be differential per data (for ext inputs) and are writable (indexes are read only).`}
-var _NeuronVarsMap = map[NeuronVars]string{0: `Spike`, 1: `Spiked`, 2: `Act`, 3: `ActInt`, 4: `Ge`, 5: `Gi`, 6: `Gk`, 7: `Inet`, 8: `Vm`, 9: `VmDend`, 10: `ISI`, 11: `ISIAvg`, 12: `Ext`, 13: `Target`, 14: `CaSpkM`, 15: `CaSpkP`, 16: `CaSpkD`, 17: `CaSpkPM`, 18: `CaLrn`, 19: `NrnCaM`, 20: `NrnCaP`, 21: `NrnCaD`, 22: `CaDiff`, 23: `RLRate`, 24: `GnmdaSyn`, 25: `Gnmda`, 26: `GnmdaLrn`, 27: `GnmdaMaint`, 28: `NmdaCa`, 29: `Gvgcc`, 30: `VgccM`, 31: `VgccH`, 32: `VgccCa`, 33: `VgccCaInt`, 34: `Burst`, 35: `BurstPrv`, 36: `CtxtGe`, 37: `CtxtGeRaw`, 38: `CtxtGeOrig`, 39: `GgabaB`, 40: `GABAB`, 41: `GABABx`, 42: `Gak`, 43: `SSGiDend`, 44: `GknaMed`, 45: `GknaSlow`, 46: `Gkir`, 47: `KirM`, 48: `Gsk`, 49: `SKCaIn`, 50: `SKCaR`, 51: `SKCaM`, 52: `Gmahp`, 53: `MahpN`, 54: `Gsahp`, 55: `SahpCa`, 56: `SahpN`, 57: `ActM`, 58: `ActP`, 59: `SpkSt1`, 60: `SpkSt2`, 61: `SpkMax`, 62: `SpkMaxCa`, 63: `SpkBin0`, 64: `SpkBin1`, 65: `SpkBin2`, 66: `SpkBin3`, 67: `SpkBin4`, 68: `SpkBin5`, 69: `SpkBin6`, 70: `SpkBin7`, 71: `SpkPrv`, 72: `GeNoise`, 73: `GeNoiseP`, 74: `GiNoise`, 75: `GiNoiseP`, 76: `GeExt`, 77: `GeRaw`, 78: `GeSyn`, 79: `GiRaw`, 80: `GiSyn`, 81: `GeInt`, 82: `GeIntNorm`, 83: `GiInt`, 84: `GModRaw`, 85: `GModSyn`, 86: `SMaintP`, 87: `GMaintRaw`, 88: `GMaintSyn`, 89: `NeurFlags`}
+var _NeuronVarsMap = map[NeuronVars]string{0: `Spike`, 1: `Spiked`, 2: `Act`, 3: `ActInt`, 4: `Ge`, 5: `Gi`, 6: `Gk`, 7: `Inet`, 8: `Vm`, 9: `VmDend`, 10: `ISI`, 11: `ISIAvg`, 12: `Ext`, 13: `Target`, 14: `CaM`, 15: `CaP`, 16: `CaD`, 17: `LearnCa`, 18: `LearnCaM`, 19: `LearnCaP`, 20: `LearnCaD`, 21: `CaDiff`, 22: `RLRate`, 23: `GnmdaSyn`, 24: `Gnmda`, 25: `GnmdaLrn`, 26: `GnmdaMaint`, 27: `NmdaCa`, 28: `Gvgcc`, 29: `VgccM`, 30: `VgccH`, 31: `VgccCa`, 32: `VgccCaInt`, 33: `Burst`, 34: `BurstPrv`, 35: `CtxtGe`, 36: `CtxtGeRaw`, 37: `CtxtGeOrig`, 38: `GgabaB`, 39: `GABAB`, 40: `GABABx`, 41: `Gak`, 42: `SSGiDend`, 43: `GknaMed`, 44: `GknaSlow`, 45: `Gkir`, 46: `KirM`, 47: `Gsk`, 48: `SKCaIn`, 49: `SKCaR`, 50: `SKCaM`, 51: `Gmahp`, 52: `MahpN`, 53: `Gsahp`, 54: `SahpCa`, 55: `SahpN`, 56: `ActM`, 57: `ActP`, 58: `Beta1`, 59: `Beta2`, 60: `SpkMax`, 61: `SpkMaxCa`, 62: `SpkBin0`, 63: `SpkBin1`, 64: `SpkBin2`, 65: `SpkBin3`, 66: `SpkBin4`, 67: `SpkBin5`, 68: `SpkBin6`, 69: `SpkBin7`, 70: `SpkPrv`, 71: `GeNoise`, 72: `GeNoiseP`, 73: `GiNoise`, 74: `GiNoiseP`, 75: `GeExt`, 76: `GeRaw`, 77: `GeSyn`, 78: `GiRaw`, 79: `GiSyn`, 80: `GeInt`, 81: `GeIntNorm`, 82: `GiInt`, 83: `GModRaw`, 84: `GModSyn`, 85: `SMaintP`, 86: `GMaintRaw`, 87: `GMaintSyn`, 88: `NeurFlags`}
// String returns the string representation of this NeuronVars value.
func (i NeuronVars) String() string { return enums.String(i, _NeuronVarsMap) }
@@ -626,7 +626,7 @@ const PathTypesN PathTypes = 12
var _PathTypesValueMap = map[string]PathTypes{`ForwardPath`: 0, `BackPath`: 1, `LateralPath`: 2, `InhibPath`: 3, `CTCtxtPath`: 4, `RWPath`: 5, `TDPredPath`: 6, `BLAPath`: 7, `HipPath`: 8, `VSPatchPath`: 9, `VSMatrixPath`: 10, `DSMatrixPath`: 11}
-var _PathTypesDescMap = map[PathTypes]string{0: `Forward is a feedforward, bottom-up pathway from sensory inputs to higher layers`, 1: `Back is a feedback, top-down pathway from higher layers back to lower layers`, 2: `Lateral is a lateral pathway within the same layer / area`, 3: `Inhib is an inhibitory pathway that drives inhibitory synaptic conductances instead of the default excitatory ones.`, 4: `CTCtxt are pathways from Superficial layers to CT layers that send Burst activations drive updating of CtxtGe excitatory conductance, at end of plus (51B Bursting) phase. Biologically, this pathway comes from the PT layer 5IB neurons, but it is simpler to use the Super neurons directly, and PT are optional for most network types. These pathways also use a special learning rule that takes into account the temporal delays in the activation states. Can also add self context from CT for deeper temporal context.`, 5: `RWPath does dopamine-modulated learning for reward prediction: Da * Send.CaSpkP (integrated current spiking activity). Uses RLPredPath parameters. Use in RWPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`, 6: `TDPredPath does dopamine-modulated learning for reward prediction: DWt = Da * Send.SpkPrv (activity on *previous* timestep) Uses RLPredPath parameters. Use in TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`, 7: `BLAPath implements the Rubicon BLA learning rule: dW = ACh * X_t-1 * (Y_t - Y_t-1) The recv delta is across trials, where the US should activate on trial boundary, to enable sufficient time for gating through to OFC, so BLA initially learns based on US present - US absent. It can also learn based on CS onset if there is a prior CS that predicts that.`, 8: ``, 9: `VSPatchPath implements the VSPatch learning rule: dW = ACh * DA * X * Y where DA is D1 vs. D2 modulated DA level, X = sending activity factor, Y = receiving activity factor, and ACh provides overall modulation.`, 10: `VSMatrixPath is for ventral striatum matrix (SPN / MSN) neurons supporting trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`, 11: `DSMatrixPath is for dorsal striatum matrix (SPN / MSN) neurons supporting trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`}
+var _PathTypesDescMap = map[PathTypes]string{0: `Forward is a feedforward, bottom-up pathway from sensory inputs to higher layers`, 1: `Back is a feedback, top-down pathway from higher layers back to lower layers`, 2: `Lateral is a lateral pathway within the same layer / area`, 3: `Inhib is an inhibitory pathway that drives inhibitory synaptic conductances instead of the default excitatory ones.`, 4: `CTCtxt are pathways from Superficial layers to CT layers that send Burst activations drive updating of CtxtGe excitatory conductance, at end of plus (51B Bursting) phase. Biologically, this pathway comes from the PT layer 5IB neurons, but it is simpler to use the Super neurons directly, and PT are optional for most network types. These pathways also use a special learning rule that takes into account the temporal delays in the activation states. Can also add self context from CT for deeper temporal context.`, 5: `RWPath does dopamine-modulated learning for reward prediction: Da * Send.CaP (integrated current spiking activity). Uses RLPredPath parameters. Use in RWPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`, 6: `TDPredPath does dopamine-modulated learning for reward prediction: DWt = Da * Send.SpkPrv (activity on *previous* timestep) Uses RLPredPath parameters. Use in TDPredLayer typically to generate reward predictions. If the Da sign is positive, the first recv unit learns fully; for negative, second one learns fully. Lower lrate applies for opposite cases. Weights are positive-only.`, 7: `BLAPath implements the Rubicon BLA learning rule: dW = ACh * X_t-1 * (Y_t - Y_t-1) The recv delta is across trials, where the US should activate on trial boundary, to enable sufficient time for gating through to OFC, so BLA initially learns based on US present - US absent. It can also learn based on CS onset if there is a prior CS that predicts that.`, 8: ``, 9: `VSPatchPath implements the VSPatch learning rule: dW = ACh * DA * X * Y where DA is D1 vs. D2 modulated DA level, X = sending activity factor, Y = receiving activity factor, and ACh provides overall modulation.`, 10: `VSMatrixPath is for ventral striatum matrix (SPN / MSN) neurons supporting trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`, 11: `DSMatrixPath is for dorsal striatum matrix (SPN / MSN) neurons supporting trace-based learning, where an initial trace of synaptic co-activity is formed, and then modulated by subsequent phasic dopamine & ACh when an outcome occurs. This bridges the temporal gap between gating activity and subsequent outcomes, and is based biologically on synaptic tags. Trace is reset at time of reward based on ACh level (from CINs in biology).`}
var _PathTypesMap = map[PathTypes]string{0: `ForwardPath`, 1: `BackPath`, 2: `LateralPath`, 3: `InhibPath`, 4: `CTCtxtPath`, 5: `RWPath`, 6: `TDPredPath`, 7: `BLAPath`, 8: `HipPath`, 9: `VSPatchPath`, 10: `VSMatrixPath`, 11: `DSMatrixPath`}
@@ -855,11 +855,11 @@ const AvgMaxVarsN AvgMaxVars = 7
//gosl:end
-var _AvgMaxVarsValueMap = map[string]AvgMaxVars{`CaSpkP`: 0, `CaSpkD`: 1, `SpkMax`: 2, `Act`: 3, `GeInt`: 4, `GiInt`: 5, `AvgDif`: 6}
+var _AvgMaxVarsValueMap = map[string]AvgMaxVars{`CaP`: 0, `CaD`: 1, `SpkMax`: 2, `Act`: 3, `GeInt`: 4, `GiInt`: 5, `AvgDif`: 6}
-var _AvgMaxVarsDescMap = map[AvgMaxVars]string{0: `CaSpkP is the primary variable for tracking overall pool activity over a recent timescale, integrated at roughly 40 msec time constant.`, 1: `CaSpkD is a slower moving activation signal, capable of reflecting activity over the entire trial.`, 2: `SpkMax is the maximum CaSpkP over the trial of processing.`, 3: `Act is the computed rate-code equivalent of current spike rate.`, 4: `GeInt is the integrated running-average value of excitatory conductance.`, 5: `GiInt is the integrated running-average value of inhibitory conductance.`, 6: `AvgDif is the integrated AvgDif between ActPct - TrgAvg. Only the Plus phase is used.`}
+var _AvgMaxVarsDescMap = map[AvgMaxVars]string{0: `CaP is the primary variable for tracking overall pool activity over a recent timescale, integrated at roughly 40 msec time constant.`, 1: `CaD is a slower moving activation signal, capable of reflecting activity over the entire trial.`, 2: `SpkMax is the maximum CaP over the trial of processing.`, 3: `Act is the computed rate-code equivalent of current spike rate.`, 4: `GeInt is the integrated running-average value of excitatory conductance.`, 5: `GiInt is the integrated running-average value of inhibitory conductance.`, 6: `AvgDif is the integrated AvgDif between ActPct - TrgAvg. Only the Plus phase is used.`}
-var _AvgMaxVarsMap = map[AvgMaxVars]string{0: `CaSpkP`, 1: `CaSpkD`, 2: `SpkMax`, 3: `Act`, 4: `GeInt`, 5: `GiInt`, 6: `AvgDif`}
+var _AvgMaxVarsMap = map[AvgMaxVars]string{0: `CaP`, 1: `CaD`, 2: `SpkMax`, 3: `Act`, 4: `GeInt`, 5: `GiInt`, 6: `AvgDif`}
// String returns the string representation of this AvgMaxVars value.
func (i AvgMaxVars) String() string { return enums.String(i, _AvgMaxVarsMap) }
diff --git a/axon/gosl.go b/axon/gosl.go
index 05fa72e53..828cf3ea6 100644
--- a/axon/gosl.go
+++ b/axon/gosl.go
@@ -62,6 +62,8 @@ func GPUInit() {
sy := gpu.NewComputeSystem(gp, "Default")
GPUSystem = sy
gpu.NewComputePipelineShaderFS(shaders, "shaders/ApplyExtsNeuron.wgsl", sy)
+ gpu.NewComputePipelineShaderFS(shaders, "shaders/Beta1Neuron.wgsl", sy)
+ gpu.NewComputePipelineShaderFS(shaders, "shaders/Beta2Neuron.wgsl", sy)
gpu.NewComputePipelineShaderFS(shaders, "shaders/BetweenGi.wgsl", sy)
gpu.NewComputePipelineShaderFS(shaders, "shaders/CycleInc.wgsl", sy)
gpu.NewComputePipelineShaderFS(shaders, "shaders/CycleNeuron.wgsl", sy)
@@ -204,6 +206,90 @@ func RunOneApplyExtsNeuron(n int, syncVars ...GPUVars) {
RunApplyExtsNeuronCPU(n)
}
}
+// RunBeta1Neuron runs the Beta1Neuron kernel with given number of elements,
+// on either the CPU or GPU depending on the UseGPU variable.
+// Can call multiple Run* kernels in a row, which are then all launched
+// in the same command submission on the GPU, which is by far the most efficient.
+// MUST call RunDone (with optional vars to sync) after all Run calls.
+// Alternatively, a single-shot RunOneBeta1Neuron call does Run and Done for a
+// single run-and-sync case.
+func RunBeta1Neuron(n int) {
+ if UseGPU {
+ RunBeta1NeuronGPU(n)
+ } else {
+ RunBeta1NeuronCPU(n)
+ }
+}
+
+// RunBeta1NeuronGPU runs the Beta1Neuron kernel on the GPU. See [RunBeta1Neuron] for more info.
+func RunBeta1NeuronGPU(n int) {
+ sy := GPUSystem
+ pl := sy.ComputePipelines["Beta1Neuron"]
+ ce, _ := sy.BeginComputePass()
+ pl.Dispatch1D(ce, n, 64)
+}
+
+// RunBeta1NeuronCPU runs the Beta1Neuron kernel on the CPU.
+func RunBeta1NeuronCPU(n int) {
+ gpu.VectorizeFunc(0, n, Beta1Neuron)
+}
+
+// RunOneBeta1Neuron runs the Beta1Neuron kernel with given number of elements,
+// on either the CPU or GPU depending on the UseGPU variable.
+// This version then calls RunDone with the given variables to sync
+// after the Run, for a single-shot Run-and-Done call. If multiple kernels
+// can be run in sequence, it is much more efficient to do multiple Run*
+// calls followed by a RunDone call.
+func RunOneBeta1Neuron(n int, syncVars ...GPUVars) {
+ if UseGPU {
+ RunBeta1NeuronGPU(n)
+ RunDone(syncVars...)
+ } else {
+ RunBeta1NeuronCPU(n)
+ }
+}
+// RunBeta2Neuron runs the Beta2Neuron kernel with given number of elements,
+// on either the CPU or GPU depending on the UseGPU variable.
+// Can call multiple Run* kernels in a row, which are then all launched
+// in the same command submission on the GPU, which is by far the most efficient.
+// MUST call RunDone (with optional vars to sync) after all Run calls.
+// Alternatively, a single-shot RunOneBeta2Neuron call does Run and Done for a
+// single run-and-sync case.
+func RunBeta2Neuron(n int) {
+ if UseGPU {
+ RunBeta2NeuronGPU(n)
+ } else {
+ RunBeta2NeuronCPU(n)
+ }
+}
+
+// RunBeta2NeuronGPU runs the Beta2Neuron kernel on the GPU. See [RunBeta2Neuron] for more info.
+func RunBeta2NeuronGPU(n int) {
+ sy := GPUSystem
+ pl := sy.ComputePipelines["Beta2Neuron"]
+ ce, _ := sy.BeginComputePass()
+ pl.Dispatch1D(ce, n, 64)
+}
+
+// RunBeta2NeuronCPU runs the Beta2Neuron kernel on the CPU.
+func RunBeta2NeuronCPU(n int) {
+ gpu.VectorizeFunc(0, n, Beta2Neuron)
+}
+
+// RunOneBeta2Neuron runs the Beta2Neuron kernel with given number of elements,
+// on either the CPU or GPU depending on the UseGPU variable.
+// This version then calls RunDone with the given variables to sync
+// after the Run, for a single-shot Run-and-Done call. If multiple kernels
+// can be run in sequence, it is much more efficient to do multiple Run*
+// calls followed by a RunDone call.
+func RunOneBeta2Neuron(n int, syncVars ...GPUVars) {
+ if UseGPU {
+ RunBeta2NeuronGPU(n)
+ RunDone(syncVars...)
+ } else {
+ RunBeta2NeuronCPU(n)
+ }
+}
// RunBetweenGi runs the BetweenGi kernel with given number of elements,
// on either the CPU or GPU depending on the UseGPU variable.
// Can call multiple Run* kernels in a row, which are then all launched
diff --git a/axon/layerparams.go b/axon/layerparams.go
index e35bae007..38e289012 100644
--- a/axon/layerparams.go
+++ b/axon/layerparams.go
@@ -98,7 +98,7 @@ type LayerParams struct {
Learn LearnNeurParams `display:"add-fields"`
// Bursts has [BurstParams] that determine how the 5IB Burst activation
- // is computed from CaSpkP integrated spiking values in Super layers.
+ // is computed from CaP integrated spiking values in Super layers.
Bursts BurstParams `display:"inline"`
// CT has params for the CT corticothalamic layer and PTPred layer that
@@ -108,7 +108,7 @@ type LayerParams struct {
// Pulv has parameters for how the plus-phase (outcome) state of Pulvinar
// thalamic relay cell neurons is computed from the corresponding driver
- // neuron Burst activation (or CaSpkP if not Super).
+ // neuron Burst activation (or CaP if not Super).
Pulv PulvParams `display:"inline"`
// Matrix has parameters for BG Striatum Matrix MSN layers, which are
diff --git a/axon/layervars.go b/axon/layervars.go
index 51cd66416..af1e48892 100644
--- a/axon/layervars.go
+++ b/axon/layervars.go
@@ -44,7 +44,7 @@ const (
LayerPhaseDiffVar
// LayerRT is the reaction time for this layer in cycles, which is -1 until the
- // Max CaSpkP level (after MaxCycStart) exceeds the Act.Attn.RTThr threshold.
+ // Max CaP level (after MaxCycStart) exceeds the Act.Attn.RTThr threshold.
LayerRT
// LayerRewPredPos is the positive-valued Reward Prediction value, for
diff --git a/axon/learn-path.go b/axon/learn-path.go
index ff5d48f9f..b2d3ad9c5 100644
--- a/axon/learn-path.go
+++ b/axon/learn-path.go
@@ -106,7 +106,7 @@ func (pt *PathParams) DWtSynCortex(ctx *Context, syni, si, ri, lpi, pi, di uint3
if isTarget {
err = syCaP - syCaD // for target layers, syn Ca drives error signal directly
} else {
- err = tr * (Neurons.Value(int(ri), int(di), int(NrnCaP)) - Neurons.Value(int(ri), int(di), int(NrnCaD))) // hiddens: recv NMDA Ca drives error signal w/ trace credit
+ err = tr * (Neurons.Value(int(ri), int(di), int(LearnCaP)) - Neurons.Value(int(ri), int(di), int(LearnCaD))) // hiddens: recv NMDA Ca drives error signal w/ trace credit
}
// note: trace ensures that nothing changes for inactive synapses..
// sb immediately -- enters into zero sum.
@@ -128,10 +128,10 @@ func (pt *PathParams) DWtSynCortex(ctx *Context, syni, si, ri, lpi, pi, di uint3
// Uses synaptically integrated spiking, computed at the Theta cycle interval.
// This is the trace version for hidden units, and uses syn CaP - CaD for targets.
func (pt *PathParams) DWtSynHebb(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
- rNrnCaP := Neurons.Value(int(ri), int(di), int(NrnCaP))
- sNrnCap := Neurons.Value(int(si), int(di), int(NrnCaP))
+ rLearnCaP := Neurons.Value(int(ri), int(di), int(LearnCaP))
+ sNrnCap := Neurons.Value(int(si), int(di), int(LearnCaP))
lwt := Synapses.Value(int(syni), int(LWt)) // linear weight
- hebb := rNrnCaP * (pt.Learn.Hebb.Up*sNrnCap*(1-lwt) - pt.Learn.Hebb.Down*(1-sNrnCap)*lwt)
+ hebb := rLearnCaP * (pt.Learn.Hebb.Up*sNrnCap*(1-lwt) - pt.Learn.Hebb.Down*(1-sNrnCap)*lwt)
// not: Neurons[ri, di, RLRate]*
SynapseTraces.Set(pt.Learn.LRate.Eff*hebb, int(syni), int(DiDWt), int(di))
}
@@ -156,13 +156,13 @@ func (pt *PathParams) DWtSynHip(ctx *Context, syni, si, ri, lpi, pi, di uint32,
}
// error-driven learning part
- rNrnCaP := Neurons.Value(int(ri), int(di), int(NrnCaP))
- rNrnCaD := Neurons.Value(int(ri), int(di), int(NrnCaD))
+ rLearnCaP := Neurons.Value(int(ri), int(di), int(LearnCaP))
+ rLearnCaD := Neurons.Value(int(ri), int(di), int(LearnCaD))
var err float32
if isTarget {
err = syCaP - syCaD // for target layers, syn Ca drives error signal directly
} else {
- err = tr * (rNrnCaP - rNrnCaD) // hiddens: recv NMDA Ca drives error signal w/ trace credit
+ err = tr * (rLearnCaP - rLearnCaD) // hiddens: recv NMDA Ca drives error signal w/ trace credit
}
// note: trace ensures that nothing changes for inactive synapses..
// sb immediately -- enters into zero sum.
@@ -175,10 +175,10 @@ func (pt *PathParams) DWtSynHip(ctx *Context, syni, si, ri, lpi, pi, di uint32,
}
// hebbian-learning part
- sNrnCap := Neurons.Value(int(si), int(di), int(NrnCaP))
+ sNrnCap := Neurons.Value(int(si), int(di), int(LearnCaP))
savg := 0.5 + pt.Hip.SAvgCor*(pt.Hip.SNominal-0.5)
savg = 0.5 / math32.Max(pt.Hip.SAvgThr, savg) // keep this Sending Average Correction term within bounds (SAvgThr)
- hebb := rNrnCaP * (sNrnCap*(savg-lwt) - (1-sNrnCap)*lwt)
+ hebb := rLearnCaP * (sNrnCap*(savg-lwt) - (1-sNrnCap)*lwt)
// setting delta weight (note: impossible to be CTCtxtPath)
dwt := Neurons.Value(int(ri), int(di), int(RLRate)) * pt.Learn.LRate.Eff * (pt.Hip.Hebb*hebb + pt.Hip.Err*err)
@@ -193,14 +193,14 @@ func (pt *PathParams) DWtSynBLA(ctx *Context, syni, si, ri, lpi, pi, di uint32)
dwt := float32(0)
ach := GlobalScalars.Value(int(GvACh), int(di))
if GlobalScalars.Value(int(GvHasRew), int(di)) > 0 { // learn and reset
- ract := Neurons.Value(int(ri), int(di), int(CaSpkD))
+ ract := Neurons.Value(int(ri), int(di), int(CaD))
if ract < pt.Learn.Trace.LearnThr {
ract = 0
}
tr := SynapseTraces.Value(int(syni), int(Tr), int(di))
ustr := pt.BLA.USTrace
tr = ustr*Neurons.Value(int(si), int(di), int(Burst)) + (1.0-ustr)*tr
- delta := Neurons.Value(int(ri), int(di), int(CaSpkP)) - Neurons.Value(int(ri), int(di), int(SpkPrv))
+ delta := Neurons.Value(int(ri), int(di), int(CaP)) - Neurons.Value(int(ri), int(di), int(SpkPrv))
if delta < 0 { // neg delta learns slower in Acq, not Ext
delta *= pt.BLA.NegDeltaLRate
}
@@ -255,7 +255,7 @@ func (pt *PathParams) DWtSynRWPred(ctx *Context, syni, si, ri, lpi, pi, di uint3
}
}
- dwt := da * Neurons.Value(int(si), int(di), int(CaSpkP)) // no recv unit activation
+ dwt := da * Neurons.Value(int(si), int(di), int(CaP)) // no recv unit activation
SynapseTraces.Set(eff_lr*dwt, int(syni), int(DiDWt), int(di))
}
@@ -296,9 +296,9 @@ func (pt *PathParams) DWtSynVSMatrix(ctx *Context, syni, si, ri, lpi, pi, di uin
}
rlr := Neurons.Value(int(ri), int(di), int(RLRate))
- rplus := Neurons.Value(int(ri), int(di), int(CaSpkP))
- rminus := Neurons.Value(int(ri), int(di), int(CaSpkD))
- sact := Neurons.Value(int(si), int(di), int(CaSpkD))
+ rplus := Neurons.Value(int(ri), int(di), int(CaP))
+ rminus := Neurons.Value(int(ri), int(di), int(CaD))
+ sact := Neurons.Value(int(si), int(di), int(CaD))
dtr := ach * (pt.Matrix.Delta * sact * (rplus - rminus))
if rminus > pt.Learn.Trace.LearnThr { // key: prevents learning if < threshold
dtr += ach * (pt.Matrix.Credit * sact * rminus)
@@ -333,9 +333,9 @@ func (pt *PathParams) DWtSynDSMatrix(ctx *Context, syni, si, ri, lpi, pi, di uin
SynapseTraces.Set(0.0, int(syni), int(DTr), int(di))
} else {
pfmod := pt.Matrix.BasePF + Neurons.Value(int(ri), int(di), int(GModSyn))
- rplus := Neurons.Value(int(ri), int(di), int(CaSpkP))
- rminus := Neurons.Value(int(ri), int(di), int(CaSpkD))
- sact := Neurons.Value(int(si), int(di), int(CaSpkD))
+ rplus := Neurons.Value(int(ri), int(di), int(CaP))
+ rminus := Neurons.Value(int(ri), int(di), int(CaD))
+ sact := Neurons.Value(int(si), int(di), int(CaD))
dtr := rlr * (pt.Matrix.Delta * sact * (rplus - rminus))
if rminus > pt.Learn.Trace.LearnThr { // key: prevents learning if < threshold
dtr += rlr * (pt.Matrix.Credit * pfmod * sact * rminus)
diff --git a/axon/learn-path.goal b/axon/learn-path.goal
index 0dd6f612c..63f81f28e 100644
--- a/axon/learn-path.goal
+++ b/axon/learn-path.goal
@@ -104,7 +104,7 @@ func (pt *PathParams) DWtSynCortex(ctx *Context, syni, si, ri, lpi, pi, di uint3
if isTarget {
err = syCaP - syCaD // for target layers, syn Ca drives error signal directly
} else {
- err = tr * (Neurons[ri, di, NrnCaP] - Neurons[ri, di, NrnCaD]) // hiddens: recv NMDA Ca drives error signal w/ trace credit
+ err = tr * (Neurons[ri, di, LearnCaP] - Neurons[ri, di, LearnCaD]) // hiddens: recv NMDA Ca drives error signal w/ trace credit
}
// note: trace ensures that nothing changes for inactive synapses..
// sb immediately -- enters into zero sum.
@@ -126,10 +126,10 @@ func (pt *PathParams) DWtSynCortex(ctx *Context, syni, si, ri, lpi, pi, di uint3
// Uses synaptically integrated spiking, computed at the Theta cycle interval.
// This is the trace version for hidden units, and uses syn CaP - CaD for targets.
func (pt *PathParams) DWtSynHebb(ctx *Context, syni, si, ri, lpi, pi, di uint32) {
- rNrnCaP := Neurons[ri, di, NrnCaP]
- sNrnCap := Neurons[si, di, NrnCaP]
+ rLearnCaP := Neurons[ri, di, LearnCaP]
+ sNrnCap := Neurons[si, di, LearnCaP]
lwt := Synapses[syni, LWt] // linear weight
- hebb := rNrnCaP * (pt.Learn.Hebb.Up*sNrnCap*(1-lwt) - pt.Learn.Hebb.Down*(1-sNrnCap)*lwt)
+ hebb := rLearnCaP * (pt.Learn.Hebb.Up*sNrnCap*(1-lwt) - pt.Learn.Hebb.Down*(1-sNrnCap)*lwt)
// not: Neurons[ri, di, RLRate]*
SynapseTraces[syni, DiDWt, di] = pt.Learn.LRate.Eff * hebb
}
@@ -154,13 +154,13 @@ func (pt *PathParams) DWtSynHip(ctx *Context, syni, si, ri, lpi, pi, di uint32,
}
// error-driven learning part
- rNrnCaP := Neurons[ri, di, NrnCaP]
- rNrnCaD := Neurons[ri, di, NrnCaD]
+ rLearnCaP := Neurons[ri, di, LearnCaP]
+ rLearnCaD := Neurons[ri, di, LearnCaD]
var err float32
if isTarget {
err = syCaP - syCaD // for target layers, syn Ca drives error signal directly
} else {
- err = tr * (rNrnCaP - rNrnCaD) // hiddens: recv NMDA Ca drives error signal w/ trace credit
+ err = tr * (rLearnCaP - rLearnCaD) // hiddens: recv NMDA Ca drives error signal w/ trace credit
}
// note: trace ensures that nothing changes for inactive synapses..
// sb immediately -- enters into zero sum.
@@ -173,10 +173,10 @@ func (pt *PathParams) DWtSynHip(ctx *Context, syni, si, ri, lpi, pi, di uint32,
}
// hebbian-learning part
- sNrnCap := Neurons[si, di, NrnCaP]
+ sNrnCap := Neurons[si, di, LearnCaP]
savg := 0.5 + pt.Hip.SAvgCor*(pt.Hip.SNominal-0.5)
savg = 0.5 / math32.Max(pt.Hip.SAvgThr, savg) // keep this Sending Average Correction term within bounds (SAvgThr)
- hebb := rNrnCaP * (sNrnCap*(savg-lwt) - (1-sNrnCap)*lwt)
+ hebb := rLearnCaP * (sNrnCap*(savg-lwt) - (1-sNrnCap)*lwt)
// setting delta weight (note: impossible to be CTCtxtPath)
dwt := Neurons[ri, di, RLRate] * pt.Learn.LRate.Eff * (pt.Hip.Hebb*hebb + pt.Hip.Err*err)
@@ -191,14 +191,14 @@ func (pt *PathParams) DWtSynBLA(ctx *Context, syni, si, ri, lpi, pi, di uint32)
dwt := float32(0)
ach := GlobalScalars[GvACh, di]
if GlobalScalars[GvHasRew, di] > 0 { // learn and reset
- ract := Neurons[ri, di, CaSpkD]
+ ract := Neurons[ri, di, CaD]
if ract < pt.Learn.Trace.LearnThr {
ract = 0
}
tr := SynapseTraces[syni, Tr, di]
ustr := pt.BLA.USTrace
tr = ustr*Neurons[si, di, Burst] + (1.0-ustr)*tr
- delta := Neurons[ri, di, CaSpkP] - Neurons[ri, di, SpkPrv]
+ delta := Neurons[ri, di, CaP] - Neurons[ri, di, SpkPrv]
if delta < 0 { // neg delta learns slower in Acq, not Ext
delta *= pt.BLA.NegDeltaLRate
}
@@ -253,7 +253,7 @@ func (pt *PathParams) DWtSynRWPred(ctx *Context, syni, si, ri, lpi, pi, di uint3
}
}
- dwt := da * Neurons[si, di, CaSpkP] // no recv unit activation
+ dwt := da * Neurons[si, di, CaP] // no recv unit activation
SynapseTraces[syni, DiDWt, di] = eff_lr * dwt
}
@@ -294,9 +294,9 @@ func (pt *PathParams) DWtSynVSMatrix(ctx *Context, syni, si, ri, lpi, pi, di uin
}
rlr := Neurons[ri, di, RLRate]
- rplus := Neurons[ri, di, CaSpkP]
- rminus := Neurons[ri, di, CaSpkD]
- sact := Neurons[si, di, CaSpkD]
+ rplus := Neurons[ri, di, CaP]
+ rminus := Neurons[ri, di, CaD]
+ sact := Neurons[si, di, CaD]
dtr := ach * (pt.Matrix.Delta * sact * (rplus - rminus))
if rminus > pt.Learn.Trace.LearnThr { // key: prevents learning if < threshold
dtr += ach * (pt.Matrix.Credit * sact * rminus)
@@ -331,9 +331,9 @@ func (pt *PathParams) DWtSynDSMatrix(ctx *Context, syni, si, ri, lpi, pi, di uin
SynapseTraces[syni, DTr, di] = 0.0
} else {
pfmod := pt.Matrix.BasePF + Neurons[ri, di, GModSyn]
- rplus := Neurons[ri, di, CaSpkP]
- rminus := Neurons[ri, di, CaSpkD]
- sact := Neurons[si, di, CaSpkD]
+ rplus := Neurons[ri, di, CaP]
+ rminus := Neurons[ri, di, CaD]
+ sact := Neurons[si, di, CaD]
dtr := rlr * (pt.Matrix.Delta * sact * (rplus - rminus))
if rminus > pt.Learn.Trace.LearnThr { // key: prevents learning if < threshold
dtr += rlr * (pt.Matrix.Credit * pfmod * sact * rminus)
diff --git a/axon/learn.go b/axon/learn.go
index 567da9480..db2e5627a 100644
--- a/axon/learn.go
+++ b/axon/learn.go
@@ -20,29 +20,29 @@ import (
//gosl:start
//gosl:import "github.com/emer/axon/v2/kinase"
-// CaLrnParams parameterizes the neuron-level calcium signals driving learning:
-// CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or
+// LearnCaParams parameterizes the neuron-level calcium signals driving learning:
+// LearnCa = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or
// use the more complex and dynamic VGCC channel directly.
-// CaLrn is then integrated in a cascading manner at multiple time scales:
+// LearnCa is then integrated in a cascading manner at multiple time scales:
// CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
-type CaLrnParams struct {
+type LearnCaParams struct {
- // denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance
+ // denomenator used for normalizing LearnCa, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance
Norm float32 `default:"80"`
// use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike
SpkVGCC slbool.Bool `default:"true"`
- // multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode
+ // multiplier on spike for computing Ca contribution to LearnCa in SpkVGCC mode
SpkVgccCa float32 `default:"35"`
- // time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn
+ // time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in LearnCa
VgccTau float32 `default:"10"`
- // time constants for integrating CaLrn across M, P and D cascading levels
+ // time constants for integrating LearnCa across M, P and D cascading levels
Dt kinase.CaDtParams `display:"inline"`
- // Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default.
+ // Threshold on CaP CaD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default.
UpdateThr float32 `default:"0.01,0.02,0.5"`
// rate = 1 / tau
@@ -54,7 +54,7 @@ type CaLrnParams struct {
pad int32
}
-func (np *CaLrnParams) Defaults() {
+func (np *LearnCaParams) Defaults() {
np.Norm = 80
np.SpkVGCC.SetBool(true)
np.SpkVgccCa = 35
@@ -65,7 +65,7 @@ func (np *CaLrnParams) Defaults() {
np.Update()
}
-func (np *CaLrnParams) Update() {
+func (np *LearnCaParams) Update() {
np.Dt.Update()
np.VgccDt = 1 / np.VgccTau
np.NormInv = 1 / np.Norm
@@ -73,7 +73,7 @@ func (np *CaLrnParams) Update() {
// VgccCa updates the simulated VGCC calcium from spiking, if that option is selected,
// and performs time-integration of VgccCa
-func (np *CaLrnParams) VgccCaFromSpike(ctx *Context, ni, di uint32) {
+func (np *LearnCaParams) VgccCaFromSpike(ctx *Context, ni, di uint32) {
if np.SpkVGCC.IsTrue() {
Neurons.Set(np.SpkVgccCa*Neurons.Value(int(ni), int(di), int(Spike)), int(ni), int(di), int(VgccCa))
}
@@ -81,16 +81,16 @@ func (np *CaLrnParams) VgccCaFromSpike(ctx *Context, ni, di uint32) {
// Dt only affects decay, not rise time
}
-// CaLrns updates the CaLrn value and its cascaded values, based on NMDA, VGCC Ca
+// LearnCas updates the LearnCa value and its cascaded values, based on NMDA, VGCC Ca
// it first calls VgccCa to update the spike-driven version of that variable, and
// perform its time-integration.
-func (np *CaLrnParams) CaLrns(ctx *Context, ni, di uint32) {
+func (np *LearnCaParams) LearnCas(ctx *Context, ni, di uint32) {
np.VgccCaFromSpike(ctx, ni, di)
- Neurons.Set(np.NormInv*(Neurons.Value(int(ni), int(di), int(NmdaCa))+Neurons.Value(int(ni), int(di), int(VgccCaInt))), int(ni), int(di), int(CaLrn))
- Neurons.SetAdd(np.Dt.MDt*(Neurons.Value(int(ni), int(di), int(CaLrn))-Neurons.Value(int(ni), int(di), int(NrnCaM))), int(ni), int(di), int(NrnCaM))
- Neurons.SetAdd(np.Dt.PDt*(Neurons.Value(int(ni), int(di), int(NrnCaM))-Neurons.Value(int(ni), int(di), int(NrnCaP))), int(ni), int(di), int(NrnCaP))
- Neurons.SetAdd(np.Dt.DDt*(Neurons.Value(int(ni), int(di), int(NrnCaP))-Neurons.Value(int(ni), int(di), int(NrnCaD))), int(ni), int(di), int(NrnCaD))
- Neurons.Set(Neurons.Value(int(ni), int(di), int(NrnCaP))-Neurons.Value(int(ni), int(di), int(NrnCaD)), int(ni), int(di), int(CaDiff))
+ Neurons.Set(np.NormInv*(Neurons.Value(int(ni), int(di), int(NmdaCa))+Neurons.Value(int(ni), int(di), int(VgccCaInt))), int(ni), int(di), int(LearnCa))
+ Neurons.SetAdd(np.Dt.MDt*(Neurons.Value(int(ni), int(di), int(LearnCa))-Neurons.Value(int(ni), int(di), int(LearnCaM))), int(ni), int(di), int(LearnCaM))
+ Neurons.SetAdd(np.Dt.PDt*(Neurons.Value(int(ni), int(di), int(LearnCaM))-Neurons.Value(int(ni), int(di), int(LearnCaP))), int(ni), int(di), int(LearnCaP))
+ Neurons.SetAdd(np.Dt.DDt*(Neurons.Value(int(ni), int(di), int(LearnCaP))-Neurons.Value(int(ni), int(di), int(LearnCaD))), int(ni), int(di), int(LearnCaD))
+ Neurons.Set(Neurons.Value(int(ni), int(di), int(LearnCaP))-Neurons.Value(int(ni), int(di), int(LearnCaD)), int(ni), int(di), int(CaDiff))
}
//////////////////////////////////////////////////////////////////////////////////////
@@ -157,7 +157,7 @@ func (ta *TrgAvgActParams) ShouldDisplay(field string) bool {
// RLRateParams
// RLRateParams are recv neuron learning rate modulation parameters.
-// Has two factors: the derivative of the sigmoid based on CaSpkD
+// Has two factors: the derivative of the sigmoid based on CaD
// activity levels, and based on the phase-wise differences in activity (Diff).
type RLRateParams struct {
@@ -176,11 +176,11 @@ type RLRateParams struct {
// modulate learning rate as a function of plus - minus differences
Diff slbool.Bool
- // threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies.
+ // threshold on Max(CaP, CaD) below which Min lrate applies.
// must be > 0 to prevent div by zero.
SpkThr float32 `default:"0.1"`
- // threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value
+ // threshold on recv neuron error delta, i.e., |CaP - CaD| below which lrate is at Min value
DiffThr float32 `default:"0.02"`
// for Diff component, minimum learning rate value when below ActDiffThr
@@ -218,7 +218,7 @@ func (rl *RLRateParams) ShouldDisplay(field string) bool {
// factor as a function of spiking activity, with mid-range values having
// full learning and extreme values a reduced learning rate:
// deriv = 4*act*(1-act) or linear: if act > .5: 2*(1-act); else 2*act
-// The activity should be CaSpkP and the layer maximum is used
+// The activity should be CaP and the layer maximum is used
// to normalize that to a 0-1 range.
func (rl *RLRateParams) RLRateSigDeriv(act float32, laymax float32) float32 {
if rl.On.IsFalse() || laymax == 0 {
@@ -242,7 +242,7 @@ func (rl *RLRateParams) RLRateSigDeriv(act float32, laymax float32) float32 {
}
// RLRateDiff returns the learning rate as a function of difference between
-// CaSpkP and CaSpkD values
+// CaP and CaD values
func (rl *RLRateParams) RLRateDiff(scap, scad float32) float32 {
if rl.On.IsFalse() || rl.Diff.IsFalse() {
return 1.0
@@ -262,8 +262,8 @@ func (rl *RLRateParams) RLRateDiff(scap, scad float32) float32 {
// This is mainly the running average activations that drive learning
type LearnNeurParams struct {
- // parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
- CaLearn CaLrnParams `display:"inline"`
+ // parameterizes the neuron-level calcium signals driving learning: LearnCa = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. LearnCa is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
+ CaLearn LearnCaParams `display:"inline"`
// parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdateThr and RLRate as a proxy for the activation (spiking) based learning signal.
CaSpk kinase.NeurCaParams `display:"inline"`
@@ -274,7 +274,7 @@ type LearnNeurParams struct {
// synaptic scaling parameters for regulating overall average activity compared to neuron's own target level
TrgAvgAct TrgAvgActParams `display:"inline"`
- // recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)
+ // recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaP - CaD| / Max(CaP, CaD)
RLRate RLRateParams `display:"inline"`
// neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms
@@ -301,7 +301,7 @@ func (ln *LearnNeurParams) Defaults() {
ln.NeuroMod.Defaults()
}
-// InitCaLrnSpk initializes the neuron-level calcium learning and spking variables.
+// InitLearnCaSpk initializes the neuron-level calcium learning and spking variables.
// Called by InitWeights (at start of learning).
func (ln *LearnNeurParams) InitNeurCa(ctx *Context, ni, di uint32) {
Neurons.Set(0, int(ni), int(di), int(GnmdaLrn))
@@ -310,16 +310,15 @@ func (ln *LearnNeurParams) InitNeurCa(ctx *Context, ni, di uint32) {
Neurons.Set(0, int(ni), int(di), int(VgccCa))
Neurons.Set(0, int(ni), int(di), int(VgccCaInt))
- Neurons.Set(0, int(ni), int(di), int(CaLrn))
+ Neurons.Set(0, int(ni), int(di), int(LearnCa))
- Neurons.Set(0, int(ni), int(di), int(CaSpkM))
- Neurons.Set(0, int(ni), int(di), int(CaSpkP))
- Neurons.Set(0, int(ni), int(di), int(CaSpkD))
- Neurons.Set(0, int(ni), int(di), int(CaSpkPM))
+ Neurons.Set(0, int(ni), int(di), int(CaM))
+ Neurons.Set(0, int(ni), int(di), int(CaP))
+ Neurons.Set(0, int(ni), int(di), int(CaD))
- Neurons.Set(0, int(ni), int(di), int(NrnCaM))
- Neurons.Set(0, int(ni), int(di), int(NrnCaP))
- Neurons.Set(0, int(ni), int(di), int(NrnCaD))
+ Neurons.Set(0, int(ni), int(di), int(LearnCaM))
+ Neurons.Set(0, int(ni), int(di), int(LearnCaP))
+ Neurons.Set(0, int(ni), int(di), int(LearnCaD))
Neurons.Set(0, int(ni), int(di), int(CaDiff))
}
@@ -335,19 +334,19 @@ func (ln *LearnNeurParams) LrnNMDAFromRaw(ctx *Context, ni, di uint32, geTot flo
Neurons.Set(float32(gnmda*ln.LrnNMDA.CaFromV(vmd)), int(ni), int(di), int(NmdaCa))
}
-// CaFromSpike updates all spike-driven calcium variables, including CaLrn and CaSpk.
+// CaFromSpike updates all spike-driven calcium variables, including LearnCa and CaSpk.
// Computed after new activation for current cycle is updated.
func (ln *LearnNeurParams) CaFromSpike(ctx *Context, ni, di uint32) {
var caSyn float32
- caSpkM := Neurons.Value(int(ni), int(di), int(CaSpkM))
- caSpkP := Neurons.Value(int(ni), int(di), int(CaSpkP))
- caSpkD := Neurons.Value(int(ni), int(di), int(CaSpkD))
+ caSpkM := Neurons.Value(int(ni), int(di), int(CaM))
+ caSpkP := Neurons.Value(int(ni), int(di), int(CaP))
+ caSpkD := Neurons.Value(int(ni), int(di), int(CaD))
ln.CaSpk.CaFromSpike(Neurons.Value(int(ni), int(di), int(Spike)), &caSyn, &caSpkM, &caSpkP, &caSpkD)
- Neurons.Set(caSpkM, int(ni), int(di), int(CaSpkM))
- Neurons.Set(caSpkP, int(ni), int(di), int(CaSpkP))
- Neurons.Set(caSpkD, int(ni), int(di), int(CaSpkD))
+ Neurons.Set(caSpkM, int(ni), int(di), int(CaM))
+ Neurons.Set(caSpkP, int(ni), int(di), int(CaP))
+ Neurons.Set(caSpkD, int(ni), int(di), int(CaD))
- ln.CaLearn.CaLrns(ctx, ni, di)
+ ln.CaLearn.LearnCas(ctx, ni, di)
}
///////////////////////////////////////////////////////////////////////
diff --git a/axon/learn.goal b/axon/learn.goal
index 28724da3b..3c8378497 100644
--- a/axon/learn.goal
+++ b/axon/learn.goal
@@ -18,29 +18,29 @@ import (
//gosl:start
//gosl:import "github.com/emer/axon/v2/kinase"
-// CaLrnParams parameterizes the neuron-level calcium signals driving learning:
-// CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or
+// LearnCaParams parameterizes the neuron-level calcium signals driving learning:
+// LearnCa = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or
// use the more complex and dynamic VGCC channel directly.
-// CaLrn is then integrated in a cascading manner at multiple time scales:
+// LearnCa is then integrated in a cascading manner at multiple time scales:
// CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
-type CaLrnParams struct {
+type LearnCaParams struct {
- // denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance
+ // denomenator used for normalizing LearnCa, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance
Norm float32 `default:"80"`
// use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike
SpkVGCC slbool.Bool `default:"true"`
- // multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode
+ // multiplier on spike for computing Ca contribution to LearnCa in SpkVGCC mode
SpkVgccCa float32 `default:"35"`
- // time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn
+ // time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in LearnCa
VgccTau float32 `default:"10"`
- // time constants for integrating CaLrn across M, P and D cascading levels
+ // time constants for integrating LearnCa across M, P and D cascading levels
Dt kinase.CaDtParams `display:"inline"`
- // Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default.
+ // Threshold on CaP CaD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default.
UpdateThr float32 `default:"0.01,0.02,0.5"`
// rate = 1 / tau
@@ -52,7 +52,7 @@ type CaLrnParams struct {
pad int32
}
-func (np *CaLrnParams) Defaults() {
+func (np *LearnCaParams) Defaults() {
np.Norm = 80
np.SpkVGCC.SetBool(true)
np.SpkVgccCa = 35
@@ -63,7 +63,7 @@ func (np *CaLrnParams) Defaults() {
np.Update()
}
-func (np *CaLrnParams) Update() {
+func (np *LearnCaParams) Update() {
np.Dt.Update()
np.VgccDt = 1 / np.VgccTau
np.NormInv = 1 / np.Norm
@@ -71,7 +71,7 @@ func (np *CaLrnParams) Update() {
// VgccCa updates the simulated VGCC calcium from spiking, if that option is selected,
// and performs time-integration of VgccCa
-func (np *CaLrnParams) VgccCaFromSpike(ctx *Context, ni, di uint32) {
+func (np *LearnCaParams) VgccCaFromSpike(ctx *Context, ni, di uint32) {
if np.SpkVGCC.IsTrue() {
Neurons[ni, di, VgccCa] = np.SpkVgccCa * Neurons[ni, di, Spike]
}
@@ -79,16 +79,16 @@ func (np *CaLrnParams) VgccCaFromSpike(ctx *Context, ni, di uint32) {
// Dt only affects decay, not rise time
}
-// CaLrns updates the CaLrn value and its cascaded values, based on NMDA, VGCC Ca
+// LearnCas updates the LearnCa value and its cascaded values, based on NMDA, VGCC Ca
// it first calls VgccCa to update the spike-driven version of that variable, and
// perform its time-integration.
-func (np *CaLrnParams) CaLrns(ctx *Context, ni, di uint32) {
+func (np *LearnCaParams) LearnCas(ctx *Context, ni, di uint32) {
np.VgccCaFromSpike(ctx, ni, di)
- Neurons[ni, di, CaLrn] = np.NormInv * (Neurons[ni, di, NmdaCa] + Neurons[ni, di, VgccCaInt])
- Neurons[ni, di, NrnCaM] += np.Dt.MDt * (Neurons[ni, di, CaLrn] - Neurons[ni, di, NrnCaM])
- Neurons[ni, di, NrnCaP] += np.Dt.PDt * (Neurons[ni, di, NrnCaM] - Neurons[ni, di, NrnCaP])
- Neurons[ni, di, NrnCaD] += np.Dt.DDt * (Neurons[ni, di, NrnCaP] - Neurons[ni, di, NrnCaD])
- Neurons[ni, di, CaDiff] = Neurons[ni, di, NrnCaP] - Neurons[ni, di, NrnCaD]
+ Neurons[ni, di, LearnCa] = np.NormInv * (Neurons[ni, di, NmdaCa] + Neurons[ni, di, VgccCaInt])
+ Neurons[ni, di, LearnCaM] += np.Dt.MDt * (Neurons[ni, di, LearnCa] - Neurons[ni, di, LearnCaM])
+ Neurons[ni, di, LearnCaP] += np.Dt.PDt * (Neurons[ni, di, LearnCaM] - Neurons[ni, di, LearnCaP])
+ Neurons[ni, di, LearnCaD] += np.Dt.DDt * (Neurons[ni, di, LearnCaP] - Neurons[ni, di, LearnCaD])
+ Neurons[ni, di, CaDiff] = Neurons[ni, di, LearnCaP] - Neurons[ni, di, LearnCaD]
}
//////////////////////////////////////////////////////////////////////////////////////
@@ -155,7 +155,7 @@ func (ta *TrgAvgActParams) ShouldDisplay(field string) bool {
// RLRateParams
// RLRateParams are recv neuron learning rate modulation parameters.
-// Has two factors: the derivative of the sigmoid based on CaSpkD
+// Has two factors: the derivative of the sigmoid based on CaD
// activity levels, and based on the phase-wise differences in activity (Diff).
type RLRateParams struct {
@@ -174,11 +174,11 @@ type RLRateParams struct {
// modulate learning rate as a function of plus - minus differences
Diff slbool.Bool
- // threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies.
+ // threshold on Max(CaP, CaD) below which Min lrate applies.
// must be > 0 to prevent div by zero.
SpkThr float32 `default:"0.1"`
- // threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value
+ // threshold on recv neuron error delta, i.e., |CaP - CaD| below which lrate is at Min value
DiffThr float32 `default:"0.02"`
// for Diff component, minimum learning rate value when below ActDiffThr
@@ -216,7 +216,7 @@ func (rl *RLRateParams) ShouldDisplay(field string) bool {
// factor as a function of spiking activity, with mid-range values having
// full learning and extreme values a reduced learning rate:
// deriv = 4*act*(1-act) or linear: if act > .5: 2*(1-act); else 2*act
-// The activity should be CaSpkP and the layer maximum is used
+// The activity should be CaP and the layer maximum is used
// to normalize that to a 0-1 range.
func (rl *RLRateParams) RLRateSigDeriv(act float32, laymax float32) float32 {
if rl.On.IsFalse() || laymax == 0 {
@@ -240,7 +240,7 @@ func (rl *RLRateParams) RLRateSigDeriv(act float32, laymax float32) float32 {
}
// RLRateDiff returns the learning rate as a function of difference between
-// CaSpkP and CaSpkD values
+// CaP and CaD values
func (rl *RLRateParams) RLRateDiff(scap, scad float32) float32 {
if rl.On.IsFalse() || rl.Diff.IsFalse() {
return 1.0
@@ -260,8 +260,8 @@ func (rl *RLRateParams) RLRateDiff(scap, scad float32) float32 {
// This is mainly the running average activations that drive learning
type LearnNeurParams struct {
- // parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
- CaLearn CaLrnParams `display:"inline"`
+ // parameterizes the neuron-level calcium signals driving learning: LearnCa = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. LearnCa is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).
+ CaLearn LearnCaParams `display:"inline"`
// parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdateThr and RLRate as a proxy for the activation (spiking) based learning signal.
CaSpk kinase.NeurCaParams `display:"inline"`
@@ -272,7 +272,7 @@ type LearnNeurParams struct {
// synaptic scaling parameters for regulating overall average activity compared to neuron's own target level
TrgAvgAct TrgAvgActParams `display:"inline"`
- // recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)
+ // recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaP - CaD| / Max(CaP, CaD)
RLRate RLRateParams `display:"inline"`
// neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms
@@ -299,7 +299,7 @@ func (ln *LearnNeurParams) Defaults() {
ln.NeuroMod.Defaults()
}
-// InitCaLrnSpk initializes the neuron-level calcium learning and spking variables.
+// InitLearnCaSpk initializes the neuron-level calcium learning and spking variables.
// Called by InitWeights (at start of learning).
func (ln *LearnNeurParams) InitNeurCa(ctx *Context, ni, di uint32) {
Neurons[ni, di, GnmdaLrn] = 0
@@ -308,16 +308,15 @@ func (ln *LearnNeurParams) InitNeurCa(ctx *Context, ni, di uint32) {
Neurons[ni, di, VgccCa] = 0
Neurons[ni, di, VgccCaInt] = 0
- Neurons[ni, di, CaLrn] = 0
+ Neurons[ni, di, LearnCa] = 0
- Neurons[ni, di, CaSpkM] = 0
- Neurons[ni, di, CaSpkP] = 0
- Neurons[ni, di, CaSpkD] = 0
- Neurons[ni, di, CaSpkPM] = 0
+ Neurons[ni, di, CaM] = 0
+ Neurons[ni, di, CaP] = 0
+ Neurons[ni, di, CaD] = 0
- Neurons[ni, di, NrnCaM] = 0
- Neurons[ni, di, NrnCaP] = 0
- Neurons[ni, di, NrnCaD] = 0
+ Neurons[ni, di, LearnCaM] = 0
+ Neurons[ni, di, LearnCaP] = 0
+ Neurons[ni, di, LearnCaD] = 0
Neurons[ni, di, CaDiff] = 0
}
@@ -333,19 +332,19 @@ func (ln *LearnNeurParams) LrnNMDAFromRaw(ctx *Context, ni, di uint32, geTot flo
Neurons[ni, di, NmdaCa] = float32(gnmda * ln.LrnNMDA.CaFromV(vmd))
}
-// CaFromSpike updates all spike-driven calcium variables, including CaLrn and CaSpk.
+// CaFromSpike updates all spike-driven calcium variables, including LearnCa and CaSpk.
// Computed after new activation for current cycle is updated.
func (ln *LearnNeurParams) CaFromSpike(ctx *Context, ni, di uint32) {
var caSyn float32
- caSpkM := Neurons[ni, di, CaSpkM]
- caSpkP := Neurons[ni, di, CaSpkP]
- caSpkD := Neurons[ni, di, CaSpkD]
+ caSpkM := Neurons[ni, di, CaM]
+ caSpkP := Neurons[ni, di, CaP]
+ caSpkD := Neurons[ni, di, CaD]
ln.CaSpk.CaFromSpike(Neurons[ni, di, Spike], &caSyn, &caSpkM, &caSpkP, &caSpkD)
- Neurons[ni, di, CaSpkM] = caSpkM
- Neurons[ni, di, CaSpkP] = caSpkP
- Neurons[ni, di, CaSpkD] = caSpkD
+ Neurons[ni, di, CaM] = caSpkM
+ Neurons[ni, di, CaP] = caSpkP
+ Neurons[ni, di, CaD] = caSpkD
- ln.CaLearn.CaLrns(ctx, ni, di)
+ ln.CaLearn.LearnCas(ctx, ni, di)
}
///////////////////////////////////////////////////////////////////////
diff --git a/axon/logging.go b/axon/logging.go
index f02588ba6..d64bd19e9 100644
--- a/axon/logging.go
+++ b/axon/logging.go
@@ -127,180 +127,6 @@ func LogAddGlobals(lg *elog.Logs, ctx *Context, mode etime.Modes, times ...etime
}
}
-// LogAddDiagnosticItems adds standard Axon diagnostic statistics to given logs,
-// across the given time levels, in higher to lower order, e.g., Epoch, Trial
-// These are useful for tuning and diagnosing the behavior of the network.
-func LogAddDiagnosticItems(lg *elog.Logs, layerNames []string, mode etime.Modes, times ...etime.Times) {
- ntimes := len(times)
- for _, lnm := range layerNames {
- clnm := lnm
- itm := lg.AddItem(&elog.Item{
- Name: clnm + "_ActMAvg",
- Type: reflect.Float64,
- FixMax: false,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(mode, times[ntimes-1]): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.Act.Minus.Avg)
- }}})
- lg.AddStdAggs(itm, mode, times...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_ActMMax",
- Type: reflect.Float64,
- FixMax: false,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(mode, times[ntimes-1]): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.Act.Minus.Max)
- }}})
- lg.AddStdAggs(itm, mode, times...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_MaxGeM",
- Type: reflect.Float64,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(mode, times[ntimes-1]): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.GeInt.Minus.Max)
- }, etime.Scope(mode, times[ntimes-2]): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.LayerValues(uint32(ctx.Di)).ActAvg.AvgMaxGeM)
- }}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_CorDiff",
- Type: reflect.Float64,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(1.0 - ly.LayerValues(uint32(ctx.Di)).PhaseDiff.Cor)
- }}})
- lg.AddStdAggs(itm, mode, times...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_GiMult",
- Type: reflect.Float64,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.LayerValues(uint32(ctx.Di)).ActAvg.GiMult)
- }}})
- lg.AddStdAggs(itm, mode, times...)
- }
-}
-
-func LogInputLayer(lg *elog.Logs, net *Network, mode etime.Modes) {
- // input layer average activity -- important for tuning
- layerNames := net.LayersByType(InputLayer)
- for _, lnm := range layerNames {
- clnm := lnm
- lg.AddItem(&elog.Item{
- Name: clnm + "_ActAvg",
- Type: reflect.Float64,
- FixMax: true,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(etime.Train, etime.Epoch): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.LayerValues(uint32(ctx.Di)).ActAvg.ActMAvg)
- }}})
- }
-}
-
-// LogAddPCAItems adds PCA statistics to log for Hidden and Target layers
-// across the given time levels, in higher to lower order, e.g., Run, Epoch, Trial
-// These are useful for diagnosing the behavior of the network.
-func LogAddPCAItems(lg *elog.Logs, net *Network, mode etime.Modes, times ...etime.Times) {
- ntimes := len(times)
- layers := net.LayersByType(SuperLayer, TargetLayer, CTLayer, PTPredLayer)
- for _, lnm := range layers {
- clnm := lnm
- cly := net.LayerByName(clnm)
- lg.AddItem(&elog.Item{
- Name: clnm + "_ActM",
- Type: reflect.Float64,
- CellShape: cly.GetSampleShape().Sizes,
- FixMax: true,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(etime.Analyze, times[ntimes-1]): func(ctx *elog.Context) {
- ctx.SetLayerSampleTensor(clnm, "ActM")
- }, etime.Scope(etime.Test, times[ntimes-1]): func(ctx *elog.Context) {
- ctx.SetLayerSampleTensor(clnm, "ActM")
- }}})
- itm := lg.AddItem(&elog.Item{
- Name: clnm + "_PCA_NStrong",
- Type: reflect.Float64,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
- ctx.SetStatFloat(ctx.Item.Name)
- }}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_PCA_Top5",
- Type: reflect.Float64,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
- ctx.SetStatFloat(ctx.Item.Name)
- }}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_PCA_Next5",
- Type: reflect.Float64,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
- ctx.SetStatFloat(ctx.Item.Name)
- }}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
-
- itm = lg.AddItem(&elog.Item{
- Name: clnm + "_PCA_Rest",
- Type: reflect.Float64,
- Write: elog.WriteMap{
- etime.Scope(etime.Train, times[ntimes-2]): func(ctx *elog.Context) {
- ctx.SetStatFloat(ctx.Item.Name)
- }}})
- lg.AddStdAggs(itm, mode, times[:ntimes-1]...)
- }
-}
-
-// LogAddLayerGeActAvgItems adds Ge and Act average items for Hidden and Target layers
-// for given mode and time (e.g., Test, Cycle)
-// These are useful for monitoring layer activity during testing.
-func LogAddLayerGeActAvgItems(lg *elog.Logs, net *Network, mode etime.Modes, etm etime.Times) {
- layers := net.LayersByType(SuperLayer, TargetLayer)
- for _, lnm := range layers {
- clnm := lnm
- lg.AddItem(&elog.Item{
- Name: clnm + "_Ge.Avg",
- Type: reflect.Float64,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(mode, etm): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.AvgMaxVarByPool(net.Context(), "Ge", 0, ctx.Di).Avg)
- }}})
- lg.AddItem(&elog.Item{
- Name: clnm + "_Act.Avg",
- Type: reflect.Float64,
- Range: minmax.F32{Max: 1},
- Write: elog.WriteMap{
- etime.Scope(mode, etm): func(ctx *elog.Context) {
- ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.AvgMaxVarByPool(net.Context(), "Act", 0, ctx.Di).Avg)
- }}})
- }
-}
-
// LogAddExtraDiagnosticItems adds extra Axon diagnostic statistics to given logs,
// across the given time levels, in higher to lower order, e.g., Epoch, Trial
// These are useful for tuning and diagnosing the behavior of the network.
@@ -310,26 +136,26 @@ func LogAddExtraDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
for _, lnm := range layers {
clnm := lnm
itm := lg.AddItem(&elog.Item{
- Name: clnm + "_CaSpkPMinusAvg",
+ Name: clnm + "_CaPMinusAvg",
Type: reflect.Float64,
FixMax: false,
Range: minmax.F32{Max: 1},
Write: elog.WriteMap{
etime.Scope(mode, times[ntimes-1]): func(ctx *elog.Context) {
ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.CaSpkP.Minus.Avg)
+ ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.CaP.Minus.Avg)
}}})
lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
- Name: clnm + "_CaSpkPMinusMax",
+ Name: clnm + "_CaPMinusMax",
Type: reflect.Float64,
FixMax: false,
Range: minmax.F32{Max: 1},
Write: elog.WriteMap{
etime.Scope(mode, times[1]): func(ctx *elog.Context) {
ly := ctx.Layer(clnm).(*Layer)
- ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.CaSpkP.Minus.Max)
+ ctx.SetFloat32(ly.Pool(0, uint32(ctx.Di)).AvgMax.CaP.Minus.Max)
}}})
lg.AddStdAggs(itm, mode, times...)
@@ -354,11 +180,11 @@ func LogAddExtraDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
}
}
-// LogAddCaLrnDiagnosticItems adds standard Axon diagnostic statistics to given logs,
+// LogAddLearnCaDiagnosticItems adds standard Axon diagnostic statistics to given logs,
// across the given time levels, in higher to lower order, e.g., Epoch, Trial
// These were useful for the development of the Ca-based "trace" learning rule
// that directly uses NMDA and VGCC-like spiking Ca
-func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, times ...etime.Times) {
+func LogAddLearnCaDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, times ...etime.Times) {
ntimes := len(times)
layers := net.LayersByType(SuperLayer, TargetLayer)
for _, lnm := range layers {
@@ -429,25 +255,25 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
- Name: clnm + "_AvgCaLrn",
+ Name: clnm + "_AvgLearnCa",
Type: reflect.Float64,
Range: minmax.F32{Max: 1},
FixMin: true,
Write: elog.WriteMap{
etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) {
- tsr := ctx.GetLayerSampleTensor(clnm, "CaLrn")
+ tsr := ctx.GetLayerSampleTensor(clnm, "LearnCa")
ctx.SetFloat64(stats.Mean(tsr))
}}})
lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
- Name: clnm + "_MaxCaLrn",
+ Name: clnm + "_MaxLearnCa",
Type: reflect.Float64,
Range: minmax.F32{Max: 1},
FixMin: true,
Write: elog.WriteMap{
etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) {
- tsr := ctx.GetLayerSampleTensor(clnm, "CaLrn")
+ tsr := ctx.GetLayerSampleTensor(clnm, "LearnCa")
ctx.SetFloat64(stats.Max(tsr))
}}})
lg.AddStdAggs(itm, mode, times...)
@@ -491,13 +317,13 @@ func LogAddCaLrnDiagnosticItems(lg *elog.Logs, mode etime.Modes, net *Network, t
lg.AddStdAggs(itm, mode, times...)
itm = lg.AddItem(&elog.Item{
- Name: clnm + "_AvgCaSpkD",
+ Name: clnm + "_AvgCaD",
Type: reflect.Float64,
Range: minmax.F32{Max: 1},
FixMin: true,
Write: elog.WriteMap{
etime.Scope(etime.Train, times[ntimes-1]): func(ctx *elog.Context) {
- tsr := ctx.GetLayerSampleTensor(clnm, "CaSpkD")
+ tsr := ctx.GetLayerSampleTensor(clnm, "CaD")
avg := stats.Mean(tsr)
ctx.SetFloat64(avg)
}}})
diff --git a/axon/looper.go b/axon/looper.go
index 7e3998a58..29aca9b10 100644
--- a/axon/looper.go
+++ b/axon/looper.go
@@ -22,25 +22,12 @@ import (
// between these two calls if it is visible and viewing synapse variables.
// - netview update calls at appropriate levels (no-op if no GUI)
func LooperStandard(ls *looper.Stacks, net *Network, viewFunc func(mode enums.Enum) *NetViewUpdate, fastNCycles, plusStart, plusEnd int, cycle, trial, trainMode enums.Enum) {
- ls.AddOnStartToAll("SetContextMode", func(md, tm enums.Enum) {
- ctx := net.Context()
- ctx.Mode = int32(md.Int64())
- })
- ls.AddEventAllModes(cycle, "MinusPhase:Start", 0, func() {
- ctx := net.Context()
- ctx.PlusPhase.SetBool(false)
- ctx.NewPhase(false)
- })
- ls.AddEventAllModes(cycle, "Beta1", 50, func() { net.SpkSt1() })
- ls.AddEventAllModes(cycle, "Beta2", 100, func() { net.SpkSt2() })
+ ls.AddEventAllModes(cycle, "Beta1", 50, func() { net.Beta1() })
+ ls.AddEventAllModes(cycle, "Beta2", 100, func() { net.Beta2() })
ls.AddEventAllModes(cycle, "MinusPhase:End", plusStart, func() { net.MinusPhase() })
- ls.AddEventAllModes(cycle, "PlusPhase:Start", plusStart, func() {
- ctx := net.Context()
- ctx.PlusPhase.SetBool(true)
- ctx.NewPhase(true)
- net.PlusPhaseStart()
- })
+ ls.AddEventAllModes(cycle, "PlusPhase:Start", plusStart, func() { net.PlusPhaseStart() })
+
for mode, st := range ls.Stacks {
cycLoop := st.Loops[cycle]
cycLoop.OnStart.Add("Cycle", func() {
@@ -62,13 +49,9 @@ func LooperStandard(ls *looper.Stacks, net *Network, viewFunc func(mode enums.En
})
trlLoop := st.Loops[trial]
- trlLoop.OnStart.Add("NewState", func() {
- testing := mode.Int64() != trainMode.Int64()
- net.NewState(mode, testing)
- })
- trlLoop.OnEnd.Add("PlusPhase:End", func() {
- net.PlusPhase()
- })
+ testing := mode.Int64() != trainMode.Int64()
+ trlLoop.OnStart.Add("NewState", func() { net.NewState(mode, testing) })
+ trlLoop.OnEnd.Add("PlusPhase:End", func() { net.PlusPhase() })
if mode.Int64() == trainMode.Int64() {
trlLoop.OnEnd.Add("UpdateWeights", func() {
if view := viewFunc(mode); view != nil && view.IsViewingSynapse() {
diff --git a/axon/neuron.go b/axon/neuron.go
index f5aef8f0b..855f9fab3 100644
--- a/axon/neuron.go
+++ b/axon/neuron.go
@@ -19,17 +19,17 @@ type NeuronFlags int32 //enums:enum
// The neuron flags
const (
- // NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned)
+ // NeuronOff flag indicates that this neuron has been turned off (i.e., lesioned).
NeuronOff NeuronFlags = 1
- // NeuronHasExt means the neuron has external input in its Ext field
+ // NeuronHasExt means the neuron has external input in its Ext field.
NeuronHasExt NeuronFlags = 2
- // NeuronHasTarg means the neuron has external target input in its Target field
+ // NeuronHasTarg means the neuron has external target input in its Target field.
NeuronHasTarg NeuronFlags = 4
- // NeuronHasCmpr means the neuron has external comparison input in its Target field -- used for computing
- // comparison statistics but does not drive neural activity ever
+ // NeuronHasCmpr means the neuron has external comparison input in its Target field.
+ // Used for computing comparison statistics but does not drive neural activity ever.
NeuronHasCmpr NeuronFlags = 8
)
@@ -39,227 +39,318 @@ const (
type NeuronVars int32 //enums:enum
const (
- /////////////////////////////////////////
- // Spiking, Activation
- // Spike is whether neuron has spiked or not on this cycle (0 or 1)
+ //////// Spiking, Activation
+
+ // Spike is whether neuron has spiked or not on this cycle (0 or 1).
Spike NeuronVars = iota
- // Spiked is 1 if neuron has spiked within the last 10 cycles (msecs), corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise -- useful for visualization and computing activity levels in terms of average spiked levels.
+ // Spiked is 1 if neuron has spiked within the last 10 cycles (msecs),
+ // corresponding to a nominal max spiking rate of 100 Hz, 0 otherwise.
+ // Useful for visualization and computing activity levels in terms of
+ // average spiked levels.
Spiked
- // Act is rate-coded activation value reflecting instantaneous estimated rate of spiking, based on 1 / ISIAvg. This drives feedback inhibition in the FFFB function (todo: this will change when better inhibition is implemented), and is integrated over time for ActInt which is then used for performance statistics and layer average activations, etc. Should not be used for learning or other computations.
+ // Act is rate-coded activation value reflecting instantaneous estimated rate
+ // of spiking, based on 1 / ISIAvg. It is integrated over time for ActInt
+ // which is then used for performance statistics and layer average activations, etc.
+ // Should not be used for learning or other computations: just for stats / display.
Act
- // ActInt is integrated running-average activation value computed from Act with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall activation state across the ThetaCycle time scale, as the overall response of network to current input state -- this is copied to ActM and ActP at the ends of the minus and plus phases, respectively, and used in computing performance-level statistics (which are typically based on ActM). Should not be used for learning or other computations.
+ // ActInt is integrated running-average activation value computed from Act
+ // with time constant Act.Dt.IntTau, to produce a longer-term integrated value
+ // reflecting the overall activation state across the ThetaCycle time scale,
+ // as the overall response of network to current input state. This is copied
+ // to ActM and ActP at the ends of the minus and plus phases, respectively,
+ // and used in computing some performance-level statistics (based on ActM).
+ // Should not be used for learning or other computations.
ActInt
- /////////////////////////////////////////
- // Major conductances, Vm
+ //////// Major conductances, Vm
- // Ge is total excitatory conductance, including all forms of excitation (e.g., NMDA) -- does *not* include Gbar.E
+ // Ge is total excitatory conductance, including all forms of excitation
+ // (e.g., NMDA). Does *not* include the Gbar.E factor.
Ge
- // Gi is total inhibitory synaptic conductance -- the net inhibitory input to the neuron -- does *not* include Gbar.I
+ // Gi is total inhibitory synaptic conductance, i.e., the net inhibitory input
+ // to the neuron. Does *not* include the Gbar.I factor.
Gi
- // Gk is total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects -- does *not* include Gbar.K
+ // Gk is total potassium conductance, typically reflecting sodium-gated potassium
+ // currents involved in adaptation effects. Does *not* include the Gbar.K factor.
Gk
- // Inet is net current produced by all channels -- drives update of Vm
+ // Inet is net current produced by all channels, which drives update of Vm.
Inet
- // Vm is membrane potential -- integrates Inet current over time
+ // Vm is the membrane potential at the cell body, which integrates Inet current
+ // over time, and drives spiking at the axon initial segment of the neuron.
Vm
- // VmDend is dendritic membrane potential -- has a slower time constant, is not subject to the VmR reset after spiking
+ // VmDend is the dendritic membrane potential, which has a slower time constant
+ // than Vm and is not subject to the VmR reset after spiking.
VmDend
- // ISI is current inter-spike-interval -- counts up since last spike. Starts at -1 when initialized.
+ // ISI is the current inter-spike-interval, which counts up since last spike.
+ // Starts at -1 when initialized.
ISI
- // ISIAvg is average inter-spike-interval -- average time interval between spikes, integrated with ISITau rate constant (relatively fast) to capture something close to an instantaneous spiking rate. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization.
+ // ISIAvg is the average inter-spike-interval, i.e., the average time interval
+ // between spikes, integrated with ISITau rate constant (relatively fast) to
+ // capture something close to an instantaneous spiking rate. Starts at -1 when
+ // initialized, and goes to -2 after first spike, and is only valid after the
+ // second spike post-initialization.
ISIAvg
- // Ext is external input: drives activation of unit from outside influences (e.g., sensory input)
+ // Ext is the external input: drives activation of unit from outside influences
+ // (e.g., sensory input).
Ext
- // Target is the target value: drives learning to produce this activation value
+ // Target is the target value: drives learning to produce this activation value.
Target
- /////////////////////////////////////////
- // Calcium for learning
-
- // CaSpkM is spike-driven calcium trace used as a neuron-level proxy for synpatic credit assignment factor based on continuous time-integrated spiking: exponential integration of SpikeG * Spike at MTau time constant (typically 5). Simulates a calmodulin (CaM) like signal at the most abstract level.
- CaSpkM
-
- // CaSpkP is continuous cascaded integration of CaSpkM at PTau time constant (typically 40), representing neuron-level purely spiking version of plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.
- CaSpkP
-
- // CaSpkD is continuous cascaded integration CaSpkP at DTau time constant (typically 40), representing neuron-level purely spiking version of minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule. Used for specialized learning and computational functions, statistics, instead of Act.
- CaSpkD
-
- // CaSpkPM is minus-phase snapshot of the CaSpkP value -- similar to ActM but using a more directly spike-integrated value.
- CaSpkPM
-
- // CaLrn is recv neuron calcium signal used to drive temporal error difference component of standard learning rule, combining NMDA (NmdaCa) and spiking-driven VGCC (VgccCaInt) calcium sources (vs. CaSpk* which only reflects spiking component). This is integrated into CaM, CaP, CaD, and temporal derivative is CaP - CaD (CaMKII - DAPK1). This approximates the backprop error derivative on net input, but VGCC component adds a proportion of recv activation delta as well -- a balance of both works best. The synaptic-level trace multiplier provides the credit assignment factor, reflecting coincident activity and potentially integrated over longer multi-trial timescales.
- CaLrn
-
- // NrnCaM is integrated CaLrn at MTau timescale (typically 5), simulating a calmodulin (CaM) like signal, which then drives CaP, CaD for delta signal driving error-driven learning.
- NrnCaM
-
- // NrnCaP is cascaded integration of CaM at PTau time constant (typically 40), representing the plus, LTP direction of weight change and capturing the function of CaMKII in the Kinase learning rule.
- NrnCaP
-
- // NrnCaD is cascaded integratoin of CaP at DTau time constant (typically 40), representing the minus, LTD direction of weight change and capturing the function of DAPK1 in the Kinase learning rule.
- NrnCaD
-
- // CaDiff is difference between CaP - CaD -- this is the error signal that drives error-driven learning.
+ //////// Spike-driven calcium for stats
+
+ // CaM is the spike-driven calcium trace at the neuron level, which then drives
+ // longer time-integrated variables: [CaP] and [CaD]. These variables are used
+ // for statistics and display to capture spiking activity at different timescales.
+ // They fluctuate more than [Act] and [ActInt], but are closer to the biological
+ // variables driving learning. CaM is the exponential integration of SpikeG * Spike
+ // using the MTau time constant (typically 5), and simulates a calmodulin (CaM)
+ // like signal, at an abstract level.
+ CaM
+
+ // CaP is the continuous cascaded integration of [CaM] using the PTau time constant
+ // (typically 40), representing a neuron-level, purely spiking version of the plus,
+ // LTP direction of weight change in the Kinase learning rule, dependent on CaMKII.
+ // This is not used for learning (see [LearnCaP]), but instead for statistics
+ // as a representation of recent activity.
+ CaP
+
+ // CaD is the continuous cascaded integration [CaP] using the DTau time constant
+ // (typically 40), representing a neuron-level, purely spiking version of the minus,
+ // LTD direction of weight change in the Kinase learning rule, dependent on DAPK1.
+ // This is not used for learning (see [LearnCaD]), but instead for statistics
+ // as a representation of trial-level activity.
+ CaD
+
+ //////// Calcium for learning
+
+ // LearnCa is the receiving neuron calcium signal, which is integrated up to
+ // [LearnCaP] and [LearnCaD], the difference of which is the temporal error
+ // component of the standard axon cortical learning rule.
+ // LearnCa combines NMDA via [NmdaCa] and spiking-driven VGCC [VgccCaInt] calcium
+ // sources (vs. CaM which only reflects a simple spiking component).
+ // The NMDA signal reflects both sending and receiving activity, while the
+ // VGCC signal is purely receiver spiking, and a balance of both works best.
+ // The synaptic-level trace factor computed from the SpkBin variables on both
+ // sender and receiver provides the credit assignment factor, reflecting coincident
+ // activity, which can be integrated over longer multi-trial timescales.
+ LearnCa
+
+ // LearnCaM is the integrated [LearnCa] at the MTau timescale (typically 5),
+ // simulating a calmodulin (CaM) like signal, which then drives [LearnCaP],
+ // and [LearnCaD] for the delta signal for error-driven learning.
+ LearnCaM
+
+ // LearnCaP is the cascaded integration of [LearnCaM] using the PTau time constant
+ // (typically 40), representing the plus, LTP direction of weight change,
+ // capturing the function of CaMKII in the Kinase learning rule.
+ LearnCaP
+
+ // LearnCaD is the cascaded integration of [LearnCaP] using the DTau time constant
+ // (typically 40), representing the minus, LTD direction of weight change,
+ // capturing the function of DAPK1 in the Kinase learning rule.
+ LearnCaD
+
+ // CaDiff is difference between [LearnCaP] - [LearnCaD]. This is the error
+ // signal that drives error-driven learning.
CaDiff
- // RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid derivative computed from the CaSpkD of recv unit, and the normalized difference CaSpkP - CaSpkD / MAX(CaSpkP - CaSpkD).
+ // RLRate is recv-unit based learning rate multiplier, reflecting the sigmoid
+ // derivative computed from [CaD] of recv unit, and the normalized difference
+ // (CaP - CaD) / MAX(CaP - CaD).
RLRate
- /////////////////////////////////////////
- // NMDA channels
+ //////// NMDA channels
- // GnmdaSyn is integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant
+ // GnmdaSyn is the integrated NMDA synaptic current on the receiving neuron.
+ // It adds GeRaw and decays with a time constant.
GnmdaSyn
- // Gnmda is net postsynaptic (recv) NMDA conductance, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential
+ // Gnmda is the net postsynaptic (receiving) NMDA conductance,
+ // after Mg V-gating and Gbar. This is added directly to Ge as it has the same
+ // reversal potential.
Gnmda
- // GnmdaLrn is learning version of integrated NMDA recv synaptic current -- adds GeRaw and decays with time constant -- drives NmdaCa that then drives CaM for learning
+ // GnmdaLrn is learning version of integrated NMDA recv synaptic current.
+ // It adds [GeRaw] and decays with a time constant. This drives [NmdaCa] that
+ // then drives [LearnCa] for learning.
GnmdaLrn
- // GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from GMaintSyn and GMaintRaw, after Mg V-gating and Gbar -- added directly to Ge as it has the same reversal potential
+ // GnmdaMaint is net postsynaptic maintenance NMDA conductance, computed from
+ // [GMaintSyn] and [GMaintRaw], after Mg V-gating and Gbar. This is added directly
+ // to Ge as it has the same reversal potential.
GnmdaMaint
- // NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM
+ // NmdaCa is NMDA calcium computed from GnmdaLrn, drives learning via CaM.
NmdaCa
- /////////////////////////////////////////
- // VGCC voltage gated calcium channels
+ //////// VGCC voltage gated calcium channels
- // Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels
+ // Gvgcc is conductance (via Ca) for VGCC voltage gated calcium channels.
Gvgcc
- // VgccM is activation gate of VGCC channels
+ // VgccM is activation gate of VGCC channels.
VgccM
- // VgccH inactivation gate of VGCC channels
+ // VgccH inactivation gate of VGCC channels.
VgccH
- // VgccCa is instantaneous VGCC calcium flux -- can be driven by spiking or directly from Gvgcc
+ // VgccCa is the instantaneous VGCC calcium flux: can be driven by spiking
+ // or directly from Gvgcc.
VgccCa
- // VgccCaInt time-integrated VGCC calcium flux -- this is actually what drives learning
+ // VgccCaInt is the time-integrated VGCC calcium flux. This is actually
+ // what drives learning.
VgccCaInt
- // Burst is 5IB bursting activation value, computed by thresholding regular CaSpkP value in Super superficial layers
+ // Burst is the layer 5 IB intrinsic bursting neural activation value,
+ // computed by thresholding the [CaP] value in Super superficial layers.
Burst
- // BurstPrv is previous Burst bursting activation from prior time step -- used for context-based learning
+ // BurstPrv is previous Burst bursting activation from prior time step.
+ // Used for context-based learning.
BurstPrv
- // CtxtGe is context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.
+ // CtxtGe is context (temporally delayed) excitatory conductance,
+ // driven by deep bursting at end of the plus phase, for CT layers.
CtxtGe
- // CtxtGeRaw is raw update of context (temporally delayed) excitatory conductance, driven by deep bursting at end of the plus phase, for CT layers.
+ // CtxtGeRaw is raw update of context (temporally delayed) excitatory
+ // conductance, driven by deep bursting at end of the plus phase, for CT layers.
CtxtGeRaw
- // CtxtGeOrig is original CtxtGe value prior to any decay factor -- updates at end of plus phase.
+ // CtxtGeOrig is original CtxtGe value prior to any decay factor.
+ // Updates at end of plus phase.
CtxtGeOrig
/////////////////////////////////////////
// GABA channels
- // GgabaB is net GABA-B conductance, after Vm gating and Gbar + Gbase -- applies to Gk, not Gi, for GIRK, with .1 reversal potential.
+ // GgabaB is net GABA-B conductance, after Vm gating and Gbar + Gbase.
+ // Applies to Gk, not Gi, for GIRK, with .1 reversal potential.
GgabaB
- // GABAB is GABA-B / GIRK activation -- time-integrated value with rise and decay time constants
+ // GABAB is GABA-B / GIRK activation, which is a time-integrated value
+ // with rise and decay time constants.
GABAB
- // GABABx is GABA-B / GIRK internal drive variable -- gets the raw activation and decays
+ // GABABx is GABA-B / GIRK internal drive variable. This gets the raw
+ // activation and decays.
GABABx
- /////////////////////////////////////////
- // SST somatostatin inhibition factors
+ //////// SST somatostatin inhibition factors
- // Gak is conductance of A-type K potassium channels
+ // Gak is the conductance of A-type K potassium channels.
Gak
- // SSGiDend is amount of SST+ somatostatin positive slow spiking inhibition applied to dendritic Vm (VmDend)
+ // SSGiDend is the amount of SST+ somatostatin positive slow spiking
+ // inhibition applied to dendritic Vm (VmDend).
SSGiDend
- // GknaMed is conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick), which produces accommodation / adaptation of firing
+ // GknaMed is the conductance of sodium-gated potassium channel (KNa)
+ // medium dynamics (Slick), which produces accommodation / adaptation.
GknaMed
- // GknaSlow is conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack), which produces accommodation / adaptation of firing
+ // GknaSlow is the conductance of sodium-gated potassium channel (KNa)
+ // slow dynamics (Slack), which produces accommodation / adaptation.
GknaSlow
// Gkir is the conductance of the potassium (K) inwardly rectifying channel,
// which is strongest at low membrane potentials. Can be modulated by DA.
Gkir
- // KirM is the Kir potassium (K) inwardly rectifying gating value
+ // KirM is the Kir potassium (K) inwardly rectifying gating value.
KirM
- /////////////////////////////////////////
- // SKCa small conductance calcium-gated potassium channels
+ //////// SKCa small conductance calcium-gated potassium channels
- // Gsk is Calcium-gated potassium channel conductance as a function of Gbar * SKCaM.
+ // Gsk is Calcium-gated potassium channel conductance as a function
+ // of Gbar * SKCaM.
Gsk
- // SKCaIn is intracellular calcium store level, available to be released with spiking as SKCaR, which can bind to SKCa receptors and drive K current. replenishment is a function of spiking activity being below a threshold
+ // SKCaIn is intracellular calcium store level, available to be released
+ // with spiking as SKCaR, which can bind to SKCa receptors and drive K
+ // current. replenishment is a function of spiking activity being below
+ // a threshold.
SKCaIn
- // SKCaR released amount of intracellular calcium, from SKCaIn, as a function of spiking events. this can bind to SKCa channels and drive K currents.
+ // SKCaR is the released amount of intracellular calcium, from SKCaIn,
+ // as a function of spiking events. This can bind to SKCa channels and
+ // drive K currents.
SKCaR
- // SKCaM is Calcium-gated potassium channel gating factor, driven by SKCaR via a Hill equation as in chans.SKPCaParams.
+ // SKCaM is the Calcium-gated potassium channel gating factor, driven by
+ // SKCaR via a Hill equation as in chans.SKPCaParams.
SKCaM
- /////////////////////////////////////////
- // AHP channels: Mahp, Sahp, Gkna
+ ///////// AHP channels: Mahp, Sahp, Gkna
- // Gmahp is medium time scale AHP conductance
+ // Gmahp is medium time scale AHP conductance.
Gmahp
- // MahpN is accumulating voltage-gated gating value for the medium time scale AHP
+ // MahpN is accumulating voltage-gated gating value for the medium time
+ // scale AHP.
MahpN
- // Gsahp is slow time scale AHP conductance
+ // Gsahp is slow time scale AHP conductance.
Gsahp
- // SahpCa is slowly accumulating calcium value that drives the slow AHP
+ // SahpCa is slowly accumulating calcium value that drives the slow AHP.
SahpCa
- // SahpN is the sAHP gating value
+ // SahpN is the sAHP gating value.
SahpN
- /////////////////////////////////////////
- // Stats, aggregate values
+ //////// Stats, aggregate values
- // ActM is ActInt activation state at end of third quarter, representing the posterior-cortical minus phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.
+ // ActM is ActInt activation state at end of third quarter, representing
+ // the posterior-cortical minus phase activation. This is used for statistics
+ // and monitoring network performance.
+ // Should not be used for learning or other computations.
ActM
- // ActP is ActInt activation state at end of fourth quarter, representing the posterior-cortical plus_phase activation -- used for statistics and monitoring network performance. Should not be used for learning or other computations.
+ // ActP is ActInt activation state at end of fourth quarter, representing
+ // the posterior-cortical plus_phase activation. This is used for statistics
+ // and monitoring network performance.
+ // Should not be used for learning or other computations.
ActP
- // SpkSt1 is the activation state at specific time point within current state processing window (e.g., 50 msec for beta cycle within standard theta cycle), as saved by SpkSt1() function. Used for example in hippocampus for CA3, CA1 learning
- SpkSt1
+ // Beta1 is the activation state at the first beta cycle within current
+ // state processing window (i.e., at 50 msec), as saved by Beta1() function.
+ // Used for example in hippocampus for CA3, CA1 learning.
+ Beta1
- // SpkSt2 is the activation state at specific time point within current state processing window (e.g., 100 msec for beta cycle within standard theta cycle), as saved by SpkSt2() function. Used for example in hippocampus for CA3, CA1 learning
- SpkSt2
+ // Beta2 is the activation state at the second beta cycle within current
+ // state processing window (i.e., at 100 msec), as saved by Beta2() function.
+ // Used for example in hippocampus for CA3, CA1 learning.
+ Beta2
- // SpkMax is maximum CaSpkP across one theta cycle time window (max of SpkMaxCa) -- used for specialized algorithms that have more phasic behavior within a single trial, e.g., BG Matrix layer gating. Also useful for visualization of peak activity of neurons.
+ // SpkMax is the maximum [CaP] across one theta cycle time window
+ // (max of SpkMaxCa). It is used for specialized algorithms that have more
+ // phasic behavior within a single trial, e.g., BG Matrix layer gating.
+ // Also useful for visualization of peak activity of neurons.
SpkMax
- // SpkMaxCa is Ca integrated like CaSpkP but only starting at MaxCycStart cycle, to prevent inclusion of carryover spiking from prior theta cycle trial -- the PTau time constant otherwise results in significant carryover. This is the input to SpkMax
+ // SpkMaxCa is the Ca integrated like [CaP] but only starting at
+ // the MaxCycStart cycle, to prevent inclusion of carryover spiking from
+ // prior theta cycle trial. The PTau time constant otherwise results in
+ // significant carryover. This is the input to SpkMax.
SpkMaxCa
- // SpkBin has aggregated spikes within 50 msec bins across the theta cycle, for computing synaptic calcium efficiently
+ // SpkBin has aggregated spikes within 50 msec bins across the theta
+ // cycle, for computing synaptic calcium efficiently.
SpkBin0
SpkBin1
SpkBin2
@@ -269,69 +360,102 @@ const (
SpkBin6
SpkBin7
- // SpkPrv is final CaSpkD activation state at end of previous theta cycle. used for specialized learning mechanisms that operate on delayed sending activations.
+ // SpkPrv is the final [CaD] activation state at end of previous theta cycle.
+ // This is used for specialized learning mechanisms that operate on delayed
+ // sending activations.
SpkPrv
- /////////////////////////////////////////
- // Noise
+ //////// Noise
- // GeNoise is integrated noise excitatory conductance, added into Ge
+ // GeNoise is integrated noise excitatory conductance, added into Ge.
GeNoise
- // GeNoiseP is accumulating poisson probability factor for driving excitatory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as function of noise firing rate.
+ // GeNoiseP is accumulating poisson probability factor for driving excitatory
+ // noise spiking. Multiply times uniform random deviate at each time step,
+ // until it gets below the target threshold based on poisson lambda as function
+ // of noise firing rate.
GeNoiseP
- // GiNoise is integrated noise inhibotyr conductance, added into Gi
+ // GiNoise is integrated noise inhibotyr conductance, added into Gi.
GiNoise
- // GiNoiseP is accumulating poisson probability factor for driving inhibitory noise spiking -- multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda as a function of noise firing rate.
+ // GiNoiseP is accumulating poisson probability factor for driving inhibitory
+ // noise spiking. Multiply times uniform random deviate at each time step,
+ // until it gets below the target threshold based on poisson lambda as a function
+ // of noise firing rate.
GiNoiseP
- /////////////////////////////////////////
- // Ge, Gi integration
+ //////// Ge, Gi integration
- // GeExt is extra excitatory conductance added to Ge -- from Ext input, GeCtxt etc
+ // GeExt is extra excitatory conductance added to Ge, from Ext input, GeCtxt etc.
GeExt
- // GeRaw is raw excitatory conductance (net input) received from senders = current raw spiking drive
+ // GeRaw is the raw excitatory conductance (net input) received from
+ // senders = current raw spiking drive.
GeRaw
- // GeSyn is time-integrated total excitatory synaptic conductance, with an instantaneous rise time from each spike (in GeRaw) and exponential decay with Dt.GeTau, aggregated over pathways -- does *not* include Gbar.E
+ // GeSyn is the time-integrated total excitatory synaptic conductance,
+ // with an instantaneous rise time from each spike (in GeRaw) and
+ // exponential decay with Dt.GeTau, aggregated over pathways.
+ // Does *not* include Gbar.E.
GeSyn
- // GiRaw is raw inhibitory conductance (net input) received from senders = current raw spiking drive
+ // GiRaw is the raw inhibitory conductance (net input) received from senders
+ // = current raw spiking drive.
GiRaw
- // GiSyn is time-integrated total inhibitory synaptic conductance, with an instantaneous rise time from each spike (in GiRaw) and exponential decay with Dt.GiTau, aggregated over pathways -- does *not* include Gbar.I. This is added with computed FFFB inhibition to get the full inhibition in Gi
+ // GiSyn is time-integrated total inhibitory synaptic conductance, with an
+ // instantaneous rise time from each spike (in GiRaw) and exponential decay
+ // with Dt.GiTau, aggregated over pathways -- does *not* include Gbar.I.
+ // This is added with computed FFFB inhibition to get the full inhibition in Gi.
GiSyn
- // GeInt is integrated running-average activation value computed from Ge with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall Ge level across the ThetaCycle time scale (Ge itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall excitatory drive
+ // GeInt is integrated running-average activation value computed from Ge
+ // with time constant Act.Dt.IntTau, to produce a longer-term integrated value
+ // reflecting the overall Ge level across the ThetaCycle time scale (Ge itself
+ // fluctuates considerably). This is useful for stats to set strength of
+ // connections etc to get neurons into right range of overall excitatory drive.
GeInt
- // GeIntNorm is normalized GeInt value (divided by the layer maximum) -- this is used for learning in layers that require learning on subthreshold activity
+ // GeIntNorm is normalized GeInt value (divided by the layer maximum).
+ // This is used for learning in layers that require learning on
+ // subthreshold activity.
GeIntNorm
- // GiInt is integrated running-average activation value computed from GiSyn with time constant Act.Dt.IntTau, to produce a longer-term integrated value reflecting the overall synaptic Gi level across the ThetaCycle time scale (Gi itself fluctuates considerably) -- useful for stats to set strength of connections etc to get neurons into right range of overall inhibitory drive
+ // GiInt is integrated running-average activation value computed from GiSyn
+ // with time constant Act.Dt.IntTau, to produce a longer-term integrated
+ // value reflecting the overall synaptic Gi level across the ThetaCycle
+ // time scale (Gi itself fluctuates considerably). Useful for stats to set
+ // strength of connections etc to get neurons into right range of overall
+ // inhibitory drive.
GiInt
- // GModRaw is raw modulatory conductance, received from GType = ModulatoryG pathways
+ // GModRaw is raw modulatory conductance, received from GType
+ // = ModulatoryG pathways.
GModRaw
- // GModSyn is syn integrated modulatory conductance, received from GType = ModulatoryG pathways
+ // GModSyn is syn integrated modulatory conductance, received from GType
+ // = ModulatoryG pathways.
GModSyn
- // SMaintP is accumulating poisson probability factor for driving self-maintenance by simulating a population of mutually interconnected neurons. multiply times uniform random deviate at each time step, until it gets below the target threshold based on poisson lambda based on accumulating self maint factor
+ // SMaintP is accumulating poisson probability factor for driving
+ // self-maintenance by simulating a population of mutually interconnected neurons.
+ // Multiply times uniform random deviate at each time step, until it gets below
+ // the target threshold based on poisson lambda based on accumulating self maint
+ // factor.
SMaintP
- // GMaintRaw is raw maintenance conductance, received from GType = MaintG pathways
+ // GMaintRaw is raw maintenance conductance, received from GType
+ // = MaintG pathways.
GMaintRaw
- // GMaintSyn is syn integrated maintenance conductance, integrated using MaintNMDA params.
+ // GMaintSyn is syn integrated maintenance conductance, integrated
+ // using MaintNMDA params.
GMaintSyn
- // NeurFlags are bit flags for binary state variables, which are converted to / from uint32.
- // These need to be in Vars because they can be differential per data (for ext inputs)
- // and are writable (indexes are read only).
+ // NeurFlags are bit flags for binary state variables, which are converted
+ // to / from uint32. These need to be in Vars because they can be
+ // differential per data (for ext inputs) and are writable (indexes are read only).
NeurFlags
)
@@ -393,9 +517,6 @@ const (
//gosl:end
-////////////////////////////////////////////////
-// Props
-
var VarCategories = []emer.VarCategory{
{"Act", "basic activation variables, including conductances, current, Vm, spiking"},
{"Learn", "calcium-based learning variables and other related learning factors"},
@@ -407,10 +528,10 @@ var VarCategories = []emer.VarCategory{
{"Wts", "weights and other synaptic-level variables"},
}
-// NeuronVarProps has all of the display properties for neuron variables, including desc tooltips
+// NeuronVarProps has display properties for neuron variables.
var NeuronVarProps = map[string]string{
- /////////////////////////////////////////
- // Spiking, Activation, Major conductances, Vm
+
+ //////// Spiking, Activation, Major conductances, Vm
"Spike": `cat:"Act"`,
"Spiked": `cat:"Act"`,
@@ -427,22 +548,19 @@ var NeuronVarProps = map[string]string{
"Ext": `cat:"Act"`,
"Target": `cat:"Act"`,
- /////////////////////////////////////////
- // Calcium for learning
-
- "CaSpkM": `cat:"Learn"`,
- "CaSpkP": `cat:"Learn"`,
- "CaSpkD": `cat:"Learn"`,
- "CaSpkPM": `cat:"Learn"`,
- "CaLrn": `cat:"Learn"`,
- "NrnCaM": `cat:"Learn"`,
- "NrnCaP": `cat:"Learn"`,
- "NrnCaD": `cat:"Learn"`,
- "CaDiff": `cat:"Learn"`,
- "RLRate": `cat:"Learn" auto-scale:"+"`,
+ //////// Calcium for learning
- /////////////////////////////////////////
- // NMDA channels
+ "CaM": `cat:"Learn"`,
+ "CaP": `cat:"Learn"`,
+ "CaD": `cat:"Learn"`,
+ "LearnCa": `cat:"Learn"`,
+ "LearnCaM": `cat:"Learn"`,
+ "LearnCaP": `cat:"Learn"`,
+ "LearnCaD": `cat:"Learn"`,
+ "CaDiff": `cat:"Learn"`,
+ "RLRate": `cat:"Learn" auto-scale:"+"`,
+
+ //////// NMDA channels
"GnmdaSyn": `cat:"Excite" auto-scale:"+"`,
"Gnmda": `cat:"Excite" auto-scale:"+"`,
@@ -450,8 +568,7 @@ var NeuronVarProps = map[string]string{
"GnmdaMaint": `cat:"Excite" auto-scale:"+"`,
"NmdaCa": `cat:"Excite" auto-scale:"+"`,
- /////////////////////////////////////////
- // VGCC voltage gated calcium channels
+ //////// VGCC voltage gated calcium channels
"Gvgcc": `cat:"Excite" auto-scale:"+"`,
"VgccM": `cat:"Excite"`,
@@ -459,8 +576,7 @@ var NeuronVarProps = map[string]string{
"VgccCa": `cat:"Excite" auto-scale:"+"`,
"VgccCaInt": `cat:"Excite" auto-scale:"+"`,
- /////////////////////////////////////////
- // Misc Excitatory Vars
+ //////// Misc Excitatory Vars
"Burst": `cat:"Excite"`,
"BurstPrv": `cat:"Excite"`,
@@ -468,15 +584,13 @@ var NeuronVarProps = map[string]string{
"CtxtGeRaw": `cat:"Excite"`,
"CtxtGeOrig": `cat:"Excite"`,
- /////////////////////////////////////////
- // GABA channels
+ //////// GABA channels
"GgabaB": `cat:"Inhib" auto-scale:"+"`,
"GABAB": `cat:"Inhib" auto-scale:"+"`,
"GABABx": `cat:"Inhib" auto-scale:"+"`,
- /////////////////////////////////////////
- // SST somatostatin inhibition factors
+ //////// SST somatostatin inhibition factors
"Gak": `cat:"Inhib" auto-scale:"+"`,
"SSGiDend": `cat:"Inhib" auto-scale:"+"`,
@@ -486,16 +600,14 @@ var NeuronVarProps = map[string]string{
"Gkir": `cat:"Inhib"`,
"KirM": `cat:"Inhib"`,
- /////////////////////////////////////////
- // SKCa small conductance calcium-gated potassium channels
+ //////// SKCa small conductance calcium-gated potassium channels
"Gsk": `cat:"Inhib"`,
"SKCaIn": `cat:"Inhib"`,
"SKCaR": `cat:"Inhib"`,
"SKCaM": `cat:"Inhib"`,
- /////////////////////////////////////////
- // AHP channels: Mahp, Sahp
+ //////// AHP channels: Mahp, Sahp
"Gmahp": `cat:"Inhib" auto-scale:"+"`,
"MahpN": `cat:"Inhib" auto-scale:"+"`,
@@ -503,13 +615,12 @@ var NeuronVarProps = map[string]string{
"SahpCa": `cat:"Inhib"`,
"SahpN": `cat:"Inhib"`,
- /////////////////////////////////////////
- // Stats, aggregate values
+ //////// Stats, aggregate values
"ActM": `cat:"Stats"`,
"ActP": `cat:"Stats"`,
- "SpkSt1": `cat:"Stats"`,
- "SpkSt2": `cat:"Stats"`,
+ "Beta1": `cat:"Stats"`,
+ "Beta2": `cat:"Stats"`,
"SpkMax": `cat:"Stats"`,
"SpkMaxCa": `cat:"Stats"`,
@@ -524,16 +635,14 @@ var NeuronVarProps = map[string]string{
"SpkPrv": `cat:"Stats"`,
- /////////////////////////////////////////
- // Noise
+ //////// Noise
"GeNoise": `cat:"Gmisc"`,
"GeNoiseP": `cat:"Gmisc"`,
"GiNoise": `cat:"Gmisc"`,
"GiNoiseP": `cat:"Gmisc"`,
- /////////////////////////////////////////
- // Ge, Gi integration
+ //////// Ge, Gi integration
"GeExt": `cat:"Gmisc"`,
"GeRaw": `cat:"Gmisc"`,
@@ -551,8 +660,7 @@ var NeuronVarProps = map[string]string{
"NeurFlags": `display:"-"`,
- /////////////////////////////////////////
- // Long-term average activation, set point for synaptic scaling
+ //////// Long-term average activation, set point for synaptic scaling
"ActAvg": `cat:"Avg"`,
"AvgPct": `cat:"Avg" range:"2"`,
@@ -562,8 +670,7 @@ var NeuronVarProps = map[string]string{
"GeBase": `cat:"Avg"`,
"GiBase": `cat:"Avg"`,
- /////////////////////////////////////////
- // Layer-level variables
+ //////// Layer-level variables
"DA": `cat:"Learn" doc:"dopamine neuromodulation (layer-level variable)"`,
"ACh": `cat:"Learn" doc:"cholinergic neuromodulation (layer-level variable)"`,
diff --git a/axon/pathtypes.go b/axon/pathtypes.go
index f9310567f..504253d24 100644
--- a/axon/pathtypes.go
+++ b/axon/pathtypes.go
@@ -37,7 +37,7 @@ const (
CTCtxtPath
// RWPath does dopamine-modulated learning for reward prediction:
- // Da * Send.CaSpkP (integrated current spiking activity).
+ // Da * Send.CaP (integrated current spiking activity).
// Uses RLPredPath parameters.
// Use in RWPredLayer typically to generate reward predictions.
// If the Da sign is positive, the first recv unit learns fully;
diff --git a/axon/pool.go b/axon/pool.go
index 829596282..3e84c29fa 100644
--- a/axon/pool.go
+++ b/axon/pool.go
@@ -94,15 +94,15 @@ const (
type AvgMaxVars int32 //enums:enum -trim-prefix AM
const (
- // CaSpkP is the primary variable for tracking overall pool activity
+ // CaP is the primary variable for tracking overall pool activity
// over a recent timescale, integrated at roughly 40 msec time constant.
- AMCaSpkP AvgMaxVars = iota
+ AMCaP AvgMaxVars = iota
- // CaSpkD is a slower moving activation signal, capable of reflecting
+ // CaD is a slower moving activation signal, capable of reflecting
// activity over the entire trial.
- AMCaSpkD
+ AMCaD
- // SpkMax is the maximum CaSpkP over the trial of processing.
+ // SpkMax is the maximum CaP over the trial of processing.
AMSpkMax
// Act is the computed rate-code equivalent of current spike rate.
@@ -129,7 +129,7 @@ const (
)
// avgMaxToNeuron is the mapping from AvgMaxVars to neuron vars.
-var avgMaxToNeuron = [AMAvgDif]NeuronVars{CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt}
+var avgMaxToNeuron = [AMAvgDif]NeuronVars{CaP, CaD, SpkMax, Act, GeInt, GiInt}
// AvgMaxVarIndex returns the variable index for accessing
// [Pools] AvgMax float32 variables.
@@ -193,8 +193,8 @@ func PoolAvgMaxUpdateVar(vr AvgMaxVars, pi, di uint32, val float32) {
// PoolAvgMaxUpdate updates the AvgMax values based on current neuron values.
// pi = global pool index.
func PoolAvgMaxUpdate(pi, di, ni uint32) {
- PoolAvgMaxUpdateVar(AMCaSpkP, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMCaSpkP]))))
- PoolAvgMaxUpdateVar(AMCaSpkD, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMCaSpkD]))))
+ PoolAvgMaxUpdateVar(AMCaP, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMCaP]))))
+ PoolAvgMaxUpdateVar(AMCaD, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMCaD]))))
PoolAvgMaxUpdateVar(AMSpkMax, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMSpkMax]))))
PoolAvgMaxUpdateVar(AMAct, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMAct]))))
PoolAvgMaxUpdateVar(AMGeInt, pi, di, math32.Abs(Neurons.Value(int(ni), int(di), int(avgMaxToNeuron[AMGeInt]))))
@@ -330,8 +330,8 @@ func PoolIntVarName(vi uint32) string {
return vr.String() + "_" + am.String()
}
-// TestValues returns a map of CaSpkD.Avg, which provides an
+// TestValues returns a map of CaD.Avg, which provides an
// integrated summary of pool activity for testing
func PoolTestValues(pi, di uint32, layKey string, vals map[string]float32) {
- vals[layKey+" CaSpkD Avg"] = PoolAvgMax(AMCaSpkD, AMCycle, Avg, pi, di)
+ vals[layKey+" CaD Avg"] = PoolAvgMax(AMCaD, AMCycle, Avg, pi, di)
}
diff --git a/axon/pool.goal b/axon/pool.goal
index 08e53d81a..17cfc8d6a 100644
--- a/axon/pool.goal
+++ b/axon/pool.goal
@@ -92,15 +92,15 @@ const (
type AvgMaxVars int32 //enums:enum -trim-prefix AM
const (
- // CaSpkP is the primary variable for tracking overall pool activity
+ // CaP is the primary variable for tracking overall pool activity
// over a recent timescale, integrated at roughly 40 msec time constant.
- AMCaSpkP AvgMaxVars = iota
+ AMCaP AvgMaxVars = iota
- // CaSpkD is a slower moving activation signal, capable of reflecting
+ // CaD is a slower moving activation signal, capable of reflecting
// activity over the entire trial.
- AMCaSpkD
+ AMCaD
- // SpkMax is the maximum CaSpkP over the trial of processing.
+ // SpkMax is the maximum CaP over the trial of processing.
AMSpkMax
// Act is the computed rate-code equivalent of current spike rate.
@@ -127,7 +127,7 @@ const (
)
// avgMaxToNeuron is the mapping from AvgMaxVars to neuron vars.
-var avgMaxToNeuron = [AMAvgDif]NeuronVars{CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt}
+var avgMaxToNeuron = [AMAvgDif]NeuronVars{CaP, CaD, SpkMax, Act, GeInt, GiInt}
// AvgMaxVarIndex returns the variable index for accessing
// [Pools] AvgMax float32 variables.
@@ -191,8 +191,8 @@ func PoolAvgMaxUpdateVar(vr AvgMaxVars, pi, di uint32, val float32) {
// PoolAvgMaxUpdate updates the AvgMax values based on current neuron values.
// pi = global pool index.
func PoolAvgMaxUpdate(pi, di, ni uint32) {
- PoolAvgMaxUpdateVar(AMCaSpkP, pi, di, math32.Abs(Neurons[ni, di, avgMaxToNeuron[AMCaSpkP]]))
- PoolAvgMaxUpdateVar(AMCaSpkD, pi, di, math32.Abs(Neurons[ni, di, avgMaxToNeuron[AMCaSpkD]]))
+ PoolAvgMaxUpdateVar(AMCaP, pi, di, math32.Abs(Neurons[ni, di, avgMaxToNeuron[AMCaP]]))
+ PoolAvgMaxUpdateVar(AMCaD, pi, di, math32.Abs(Neurons[ni, di, avgMaxToNeuron[AMCaD]]))
PoolAvgMaxUpdateVar(AMSpkMax, pi, di, math32.Abs(Neurons[ni, di, avgMaxToNeuron[AMSpkMax]]))
PoolAvgMaxUpdateVar(AMAct, pi, di, math32.Abs(Neurons[ni, di, avgMaxToNeuron[AMAct]]))
PoolAvgMaxUpdateVar(AMGeInt, pi, di, math32.Abs(Neurons[ni, di, avgMaxToNeuron[AMGeInt]]))
@@ -328,9 +328,9 @@ func PoolIntVarName(vi uint32) string {
return vr.String() + "_" + am.String()
}
-// TestValues returns a map of CaSpkD.Avg, which provides an
+// TestValues returns a map of CaD.Avg, which provides an
// integrated summary of pool activity for testing
func PoolTestValues(pi, di uint32, layKey string, vals map[string]float32) {
- vals[layKey+" CaSpkD Avg"] = PoolAvgMax(AMCaSpkD, AMCycle, Avg, pi, di)
+ vals[layKey+" CaD Avg"] = PoolAvgMax(AMCaD, AMCycle, Avg, pi, di)
}
diff --git a/axon/rubicon-layer.go b/axon/rubicon-layer.go
index fcee24e60..ce15e43ab 100644
--- a/axon/rubicon-layer.go
+++ b/axon/rubicon-layer.go
@@ -240,7 +240,7 @@ func (ly *Layer) LDTDefaults() {
lp.Inhib.Pool.On.SetBool(false)
lp.Acts.Decay.Act = 1
lp.Acts.Decay.Glong = 1
- lp.Acts.Decay.LearnCa = 1 // uses CaSpkD as a readout!
+ lp.Acts.Decay.LearnCa = 1 // uses CaD as a readout!
lp.Learn.TrgAvgAct.RescaleOn.SetBool(false)
// lp.Rubicon.Thr = 0.2
// lp.Rubicon.Gain = 2
@@ -254,7 +254,7 @@ func (ly *Layer) LDTDefaults() {
func (ly *LayerParams) VSPatchDefaults() {
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
- ly.Acts.Decay.LearnCa = 1 // uses CaSpkD as a readout!
+ ly.Acts.Decay.LearnCa = 1 // uses CaD as a readout!
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.5
diff --git a/axon/rubicon-layer.goal b/axon/rubicon-layer.goal
index 32c94b06b..a9ab1c021 100644
--- a/axon/rubicon-layer.goal
+++ b/axon/rubicon-layer.goal
@@ -238,7 +238,7 @@ func (ly *Layer) LDTDefaults() {
lp.Inhib.Pool.On.SetBool(false)
lp.Acts.Decay.Act = 1
lp.Acts.Decay.Glong = 1
- lp.Acts.Decay.LearnCa = 1 // uses CaSpkD as a readout!
+ lp.Acts.Decay.LearnCa = 1 // uses CaD as a readout!
lp.Learn.TrgAvgAct.RescaleOn.SetBool(false)
// lp.Rubicon.Thr = 0.2
// lp.Rubicon.Gain = 2
@@ -252,7 +252,7 @@ func (ly *Layer) LDTDefaults() {
func (ly *LayerParams) VSPatchDefaults() {
ly.Acts.Decay.Act = 1
ly.Acts.Decay.Glong = 1
- ly.Acts.Decay.LearnCa = 1 // uses CaSpkD as a readout!
+ ly.Acts.Decay.LearnCa = 1 // uses CaD as a readout!
ly.Inhib.Pool.On.SetBool(true)
ly.Inhib.Layer.On.SetBool(true)
ly.Inhib.Layer.Gi = 0.5
diff --git a/axon/rubicon-net.go b/axon/rubicon-net.go
index f0ae17caa..38c98bd04 100644
--- a/axon/rubicon-net.go
+++ b/axon/rubicon-net.go
@@ -425,7 +425,7 @@ func (nt *Network) AddSCLayer2D(prefix string, nNeurY, nNeurX int) *Layer {
ly.Inhib.Pool.On.SetBool(false)
ly.Acts.Decay.Act = 1 // key for rapid updating
ly.Acts.Decay.Glong = 0.0
- ly.Acts.Decay.LearnCa = 1.0 // uses CaSpkD as a readout -- clear
+ ly.Acts.Decay.LearnCa = 1.0 // uses CaD as a readout -- clear
ly.Acts.Decay.OnRew.SetBool(true)
ly.Acts.KNa.TrialSlow.SetBool(true)
ly.Acts.KNa.Slow.Max = 0.05 // 0.1 enough to fully inhibit over several trials
@@ -449,7 +449,7 @@ func (nt *Network) AddSCLayer4D(prefix string, nPoolsY, nPoolsX, nNeurY, nNeurX
ly.Inhib.Pool.Gi = 1.2
ly.Acts.Decay.Act = 1 // key for rapid updating
ly.Acts.Decay.Glong = 0.0
- ly.Acts.Decay.LearnCa = 1.0 // uses CaSpkD as a readout -- clear
+ ly.Acts.Decay.LearnCa = 1.0 // uses CaD as a readout -- clear
ly.Acts.Decay.OnRew.SetBool(true)
ly.Acts.KNa.TrialSlow.SetBool(true)
ly.Acts.KNa.Slow.Max = 1
diff --git a/axon/rubicon-path.go b/axon/rubicon-path.go
index 89ed70924..e7acd00e6 100644
--- a/axon/rubicon-path.go
+++ b/axon/rubicon-path.go
@@ -8,7 +8,7 @@ package axon
// BLAPathParams has parameters for basolateral amygdala learning.
// Learning is driven by the Tr trace as function of ACh * Send Act
-// recorded prior to US, and at US, recv unit delta: CaSpkP - SpkPrv
+// recorded prior to US, and at US, recv unit delta: CaP - SpkPrv
// times normalized GeIntNorm for recv unit credit assignment.
// The Learn.Trace.Tau time constant determines trace updating over trials
// when ACh is above threshold -- this determines strength of second-order
diff --git a/axon/rubicon.go b/axon/rubicon.go
index 46c8690d2..87736e243 100644
--- a/axon/rubicon.go
+++ b/axon/rubicon.go
@@ -904,7 +904,7 @@ func (rp *Rubicon) Step(di uint32, rnd randx.Rand) {
}
// SetGoalMaintFromLayer sets the GoalMaint global state variable
-// from the average activity (CaSpkD) of the given layer name.
+// from the average activity (CaD) of the given layer name.
// GoalMaint is normalized 0-1 based on the given max activity level,
// with anything out of range clamped to 0-1 range.
// Returns (and logs) an error if layer name not found.
@@ -916,7 +916,7 @@ func (rp *Rubicon) SetGoalMaintFromLayer(di uint32, net *Network, layName string
return err
}
lpi := ly.Params.PoolIndex(0)
- act := PoolAvgMax(AMCaSpkD, AMCycle, Avg, lpi, di)
+ act := PoolAvgMax(AMCaD, AMCycle, Avg, lpi, di)
gm := float32(0)
if act > maxAct {
gm = 1
@@ -927,7 +927,7 @@ func (rp *Rubicon) SetGoalMaintFromLayer(di uint32, net *Network, layName string
return nil
}
-// DecodeFromLayer decodes value and variance from the average activity (CaSpkD)
+// DecodeFromLayer decodes value and variance from the average activity (CaD)
// of the given layer name. Use for decoding PVposEst and Var, and PVnegEst and Var
func (rp *Rubicon) DecodeFromLayer(di uint32, net *Network, layName string) (val, vr float32, err error) {
ly := net.LayerByName(layName)
@@ -936,7 +936,7 @@ func (rp *Rubicon) DecodeFromLayer(di uint32, net *Network, layName string) (val
slog.Error(err.Error())
return
}
- ly.UnitValues(&rp.decodeActs, "CaSpkD", int(di))
+ ly.UnitValues(&rp.decodeActs, "CaD", int(di))
val = ly.Params.Acts.PopCode.Decode(rp.decodeActs)
vr = ly.Params.Acts.PopCode.Uncertainty(val, rp.decodeActs)
return
diff --git a/axon/rubicon.goal b/axon/rubicon.goal
index bfc305ecd..4e287663d 100644
--- a/axon/rubicon.goal
+++ b/axon/rubicon.goal
@@ -900,7 +900,7 @@ func (rp *Rubicon) Step(di uint32, rnd randx.Rand) {
}
// SetGoalMaintFromLayer sets the GoalMaint global state variable
-// from the average activity (CaSpkD) of the given layer name.
+// from the average activity (CaD) of the given layer name.
// GoalMaint is normalized 0-1 based on the given max activity level,
// with anything out of range clamped to 0-1 range.
// Returns (and logs) an error if layer name not found.
@@ -912,7 +912,7 @@ func (rp *Rubicon) SetGoalMaintFromLayer(di uint32, net *Network, layName string
return err
}
lpi := ly.Params.PoolIndex(0)
- act := PoolAvgMax(AMCaSpkD, AMCycle, Avg, lpi, di)
+ act := PoolAvgMax(AMCaD, AMCycle, Avg, lpi, di)
gm := float32(0)
if act > maxAct {
gm = 1
@@ -923,7 +923,7 @@ func (rp *Rubicon) SetGoalMaintFromLayer(di uint32, net *Network, layName string
return nil
}
-// DecodeFromLayer decodes value and variance from the average activity (CaSpkD)
+// DecodeFromLayer decodes value and variance from the average activity (CaD)
// of the given layer name. Use for decoding PVposEst and Var, and PVnegEst and Var
func (rp *Rubicon) DecodeFromLayer(di uint32, net *Network, layName string) (val, vr float32, err error) {
ly := net.LayerByName(layName)
@@ -932,7 +932,7 @@ func (rp *Rubicon) DecodeFromLayer(di uint32, net *Network, layName string) (val
slog.Error(err.Error())
return
}
- ly.UnitValues(&rp.decodeActs, "CaSpkD", int(di))
+ ly.UnitValues(&rp.decodeActs, "CaD", int(di))
val = ly.Params.Acts.PopCode.Decode(rp.decodeActs)
vr = ly.Params.Acts.PopCode.Uncertainty(val, rp.decodeActs)
return
diff --git a/axon/shaders/ApplyExtsNeuron.wgsl b/axon/shaders/ApplyExtsNeuron.wgsl
index e8dbb6460..6d3f09218 100644
--- a/axon/shaders/ApplyExtsNeuron.wgsl
+++ b/axon/shaders/ApplyExtsNeuron.wgsl
@@ -477,7 +477,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -782,7 +782,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -815,7 +815,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -971,82 +971,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1177,8 +1176,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1187,7 +1186,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
///////////// import: "rand.go"
alias RandFunIndex = u32;
diff --git a/axon/shaders/Beta1Neuron.wgsl b/axon/shaders/Beta1Neuron.wgsl
new file mode 100644
index 000000000..d7e55cce7
--- /dev/null
+++ b/axon/shaders/Beta1Neuron.wgsl
@@ -0,0 +1,1386 @@
+// Code generated by "gosl"; DO NOT EDIT
+// kernel: Beta1Neuron
+
+// // Layers are all the layer parameters.
+@group(0) @binding(0)
+var Layers: array;
+@group(0) @binding(1)
+var Paths: array;
+// // NetworkIxs have indexes and sizes for entire network (one only).
+@group(1) @binding(0)
+var NetworkIxs: array;
+@group(1) @binding(1)
+var NeuronIxs: array;
+@group(1) @binding(2)
+var SynapseIxs: array;
+@group(1) @binding(3)
+var PathSendCon: array;
+@group(1) @binding(4)
+var RecvPathIxs: array;
+@group(1) @binding(5)
+var PathRecvCon: array;
+@group(1) @binding(6)
+var RecvSynIxs: array;
+// // Ctx is the current context state (one only).
+@group(2) @binding(0)
+var Ctx: array;
+@group(2) @binding(1)
+var Neurons: array;
+@group(2) @binding(2)
+var NeuronAvgs: array;
+@group(2) @binding(3)
+var LayerStates: array;
+@group(2) @binding(4)
+var GlobalScalars: array;
+@group(2) @binding(5)
+var GlobalVectors: array;
+@group(2) @binding(6)
+var Exts: array;
+// // Pools are the [PoolVars] float32 state values for layer and sub-pool inhibition, // Including the float32 AvgMax values by Phase and variable: use [AvgMaxVarIndex]. // [Layer * Pools][PoolVars+AvgMax][Data]
+@group(3) @binding(0)
+var Pools: array;
+@group(3) @binding(1)
+var PoolsInt: array;
+@group(3) @binding(2)
+var PathGBuf: array;
+@group(3) @binding(3)
+var PathGSyns: array;
+@group(3) @binding(4)
+var Synapses: array;
+@group(3) @binding(5)
+var SynapseTraces: array;
+
+alias GPUVars = i32;
+
+@compute @workgroup_size(64, 1, 1)
+fn main(@builtin(global_invocation_id) idx: vec3) {
+ Beta1Neuron(idx.x);
+}
+
+fn IndexU322D(s0: u32, s1: u32, i0: u32, i1: u32) -> u32 {
+ return u32(2) + s0 * i0 + s1 * i1;
+}
+
+fn IndexU321D(s0: u32, i0: u32) -> u32 {
+ return u32(1) + s0 * i0;
+}
+
+fn IndexF323D(s0: f32, s1: f32, s2: f32, i0: u32, i1: u32, i2: u32) -> u32 {
+ return u32(3) + bitcast(s0) * i0 + bitcast(s1) * i1 + bitcast(s2) * i2;
+}
+
+fn IndexF322D(s0: f32, s1: f32, i0: u32, i1: u32) -> u32 {
+ return u32(2) + bitcast(s0) * i0 + bitcast(s1) * i1;
+}
+
+fn IndexI323D(s0: i32, s1: i32, s2: i32, i0: u32, i1: u32, i2: u32) -> u32 {
+ return u32(3) + u32(s0) * i0 + u32(s1) * i1 + u32(s2) * i2;
+}
+
+
+///////////// import: "vars.go"
+
+///////////// import: "act-layer.go"
+fn LayerParams_Beta1Neuron(ly: ptr, ctx: ptr, ni: u32,di: u32) {
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Beta1))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaP))];
+}
+
+///////////// import: "act-net.go"
+fn Beta1Neuron(i: u32) { //gosl:kernel
+ var ctx = Ctx[0];
+ var di = Context_DataIndex(&ctx, i);
+ var ni = Context_ItemIndex(&ctx, i);
+ var li = NeuronIxs[IndexU322D(NeuronIxs[0], NeuronIxs[1], u32(ni),u32(NrnLayIndex))];
+ var layers=Layers[li]; LayerParams_Beta1Neuron(&layers, &ctx, ni, di);
+ Ctx[0] = ctx;
+}
+
+///////////// import: "act-path.go"
+alias PathGTypes = i32; //enums:enum
+const ExcitatoryG: PathGTypes = 0;
+const InhibitoryG: PathGTypes = 1;
+const ModulatoryG: PathGTypes = 2;
+const MaintG: PathGTypes = 3;
+const ContextG: PathGTypes = 4;
+struct SynComParams {
+ GType: PathGTypes,
+ Delay: u32,
+ MaxDelay: u32,
+ DelLen: u32,
+}
+struct PathScaleParams {
+ Rel: f32,
+ Abs: f32,
+ pad: f32,
+ pad1: f32,
+}
+
+///////////// import: "act.go"
+struct SpikeParams {
+ Thr: f32,
+ VmR: f32,
+ Tr: i32,
+ RTau: f32,
+ Exp: i32,
+ ExpSlope: f32,
+ ExpThr: f32,
+ MaxHz: f32,
+ ISITau: f32,
+ ISIDt: f32,
+ RDt: f32,
+ pad: i32,
+}
+struct DendParams {
+ GbarExp: f32,
+ GbarR: f32,
+ SSGi: f32,
+ HasMod: i32,
+ ModGain: f32,
+ ModACh: i32,
+ ModBase: f32,
+ pad: i32,
+}
+struct ActInitParams {
+ Vm: f32,
+ Act: f32,
+ GeBase: f32,
+ GiBase: f32,
+ GeVar: f32,
+ GiVar: f32,
+ pad: i32,
+ pad1: i32,
+}
+struct DecayParams {
+ Act: f32,
+ Glong: f32,
+ AHP: f32,
+ LearnCa: f32,
+ OnRew: i32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+struct DtParams {
+ Integ: f32,
+ VmTau: f32,
+ VmDendTau: f32,
+ VmSteps: i32,
+ GeTau: f32,
+ GiTau: f32,
+ IntTau: f32,
+ LongAvgTau: f32,
+ MaxCycStart: i32,
+ VmDt: f32,
+ VmDendDt: f32,
+ DtStep: f32,
+ GeDt: f32,
+ GiDt: f32,
+ IntDt: f32,
+ LongAvgDt: f32,
+}
+struct SpikeNoiseParams {
+ On: i32,
+ GeHz: f32,
+ Ge: f32,
+ GiHz: f32,
+ Gi: f32,
+ MaintGe: i32,
+ GeExpInt: f32,
+ GiExpInt: f32,
+}
+struct ClampParams {
+ IsInput: i32,
+ IsTarget: i32,
+ Ge: f32,
+ Add: i32,
+ ErrThr: f32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+struct SMaintParams {
+ On: i32,
+ NNeurons: f32,
+ Gbar: f32,
+ Inhib: f32,
+ ISI: F32,
+}
+struct PopCodeParams {
+ On: i32,
+ Ge: f32,
+ Min: f32,
+ Max: f32,
+ MinAct: f32,
+ MinSigma: f32,
+ MaxSigma: f32,
+ Clip: i32,
+}
+struct ActParams {
+ Spikes: SpikeParams,
+ Dend: DendParams,
+ Init: ActInitParams,
+ Decay: DecayParams,
+ Dt: DtParams,
+ Gbar: Chans,
+ Erev: Chans,
+ Clamp: ClampParams,
+ Noise: SpikeNoiseParams,
+ VmRange: F32,
+ Mahp: MahpParams,
+ Sahp: SahpParams,
+ KNa: KNaMedSlow,
+ Kir: KirParams,
+ NMDA: NMDAParams,
+ MaintNMDA: NMDAParams,
+ GabaB: GABABParams,
+ VGCC: VGCCParams,
+ AK: AKsParams,
+ SKCa: SKCaParams,
+ SMaint: SMaintParams,
+ PopCode: PopCodeParams,
+}
+
+///////////// import: "chans-ak.go"
+struct AKsParams {
+ Gbar: f32,
+ Hf: f32,
+ Mf: f32,
+ Voff: f32,
+ Vmax: f32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+}
+
+///////////// import: "chans-chans.go"
+struct Chans {
+ E: f32,
+ L: f32,
+ I: f32,
+ K: f32,
+}
+
+///////////// import: "chans-gabab.go"
+struct GABABParams {
+ Gbar: f32,
+ RiseTau: f32,
+ DecayTau: f32,
+ Gbase: f32,
+ GiSpike: f32,
+ MaxTime: f32,
+ TauFact: f32,
+ RiseDt: f32,
+ DecayDt: f32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+
+///////////// import: "chans-kir.go"
+struct KirParams {
+ Gbar: f32,
+ MinfOff: f32,
+ MinfTau: f32,
+ RiseOff: f32,
+ RiseTau: f32,
+ DecayOff: f32,
+ DecayTau: f32,
+ Mrest: f32,
+}
+
+///////////// import: "chans-kna.go"
+struct KNaParams {
+ On: i32,
+ Rise: f32,
+ Max: f32,
+ Tau: f32,
+ Dt: f32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+}
+struct KNaMedSlow {
+ On: i32,
+ TrialSlow: i32,
+ pad: i32,
+ pad1: i32,
+ Med: KNaParams,
+ Slow: KNaParams,
+}
+
+///////////// import: "chans-mahp.go"
+struct MahpParams {
+ Gbar: f32,
+ Voff: f32,
+ Vslope: f32,
+ TauMax: f32,
+ Tadj: f32,
+ DtMax: f32,
+ pad: i32,
+ pad2: i32,
+}
+
+///////////// import: "chans-nmda.go"
+struct NMDAParams {
+ Gbar: f32,
+ Tau: f32,
+ ITau: f32,
+ MgC: f32,
+ Voff: f32,
+ Dt: f32,
+ IDt: f32,
+ MgFact: f32,
+}
+
+///////////// import: "chans-sahp.go"
+struct SahpParams {
+ Gbar: f32,
+ CaTau: f32,
+ Off: f32,
+ Slope: f32,
+ TauMax: f32,
+ CaDt: f32,
+ DtMax: f32,
+ pad: i32,
+}
+
+///////////// import: "chans-skca.go"
+struct SKCaParams {
+ Gbar: f32,
+ C50: f32,
+ ActTau: f32,
+ DeTau: f32,
+ KCaR: f32,
+ CaRDecayTau: f32,
+ CaInThr: f32,
+ CaInTau: f32,
+ ActDt: f32,
+ DeDt: f32,
+ CaRDecayDt: f32,
+ CaInDt: f32,
+}
+
+///////////// import: "chans-vgcc.go"
+struct VGCCParams {
+ Gbar: f32,
+ Ca: f32,
+ pad: i32,
+ pad1: i32,
+}
+
+///////////// import: "context.go"
+struct Context {
+ NData: u32,
+ Mode: i32,
+ Testing: i32,
+ Phase: i32,
+ PlusPhase: i32,
+ PhaseCycle: i32,
+ Cycle: i32,
+ ThetaCycles: i32,
+ CyclesTotal: i32,
+ Time: f32,
+ TrialsTotal: i32,
+ TimePerCycle: f32,
+ SlowInterval: i32,
+ SlowCounter: i32,
+ pad: i32,
+ pad1: i32,
+ RandCounter: RandCounter,
+}
+fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 {
+ return idx / (*ctx).NData;
+}
+fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 {
+ return idx % (*ctx).NData;
+}
+
+///////////// import: "deep-layer.go"
+struct BurstParams {
+ ThrRel: f32,
+ ThrAbs: f32,
+ pad: f32,
+ pad1: f32,
+}
+struct CTParams {
+ GeGain: f32,
+ DecayTau: f32,
+ OFCposPT: i32,
+ DecayDt: f32,
+}
+struct PulvParams {
+ DriveScale: f32,
+ FullDriveAct: f32,
+ DriveLayIndex: i32,
+ pad: f32,
+}
+
+///////////// import: "deep-path.go"
+
+///////////// import: "enumgen.go"
+const PathGTypesN: PathGTypes = 5;
+const GlobalScalarVarsN: GlobalScalarVars = 57;
+const GlobalVectorVarsN: GlobalVectorVars = 10;
+const GPUVarsN: GPUVars = 22;
+const LayerTypesN: LayerTypes = 30;
+const LayerVarsN: LayerVars = 11;
+const ViewTimesN: ViewTimes = 7;
+const DAModTypesN: DAModTypes = 4;
+const ValenceTypesN: ValenceTypes = 3;
+const NeuronFlagsN: NeuronFlags = 9;
+const NeuronVarsN: NeuronVars = 89;
+const NeuronAvgVarsN: NeuronAvgVars = 7;
+const NeuronIndexVarsN: NeuronIndexVars = 3;
+const PathTypesN: PathTypes = 12;
+const GPLayerTypesN: GPLayerTypes = 3;
+const PoolIntVarsN: PoolIntVars = 10;
+const AvgMaxN: AvgMax = 2;
+const AvgMaxPhasesN: AvgMaxPhases = 4;
+const AvgMaxVarsN: AvgMaxVars = 7;
+const SynapseVarsN: SynapseVars = 5;
+const SynapseTraceVarsN: SynapseTraceVars = 3;
+const SynapseIndexVarsN: SynapseIndexVars = 3;
+
+///////////// import: "fsfffb-enumgen.go"
+const InhibVarsN: InhibVars = 16;
+
+///////////// import: "fsfffb-fsfffb.go"
+struct GiParams {
+ On: i32,
+ Gi: f32,
+ FB: f32,
+ FSTau: f32,
+ SS: f32,
+ SSfTau: f32,
+ SSiTau: f32,
+ FS0: f32,
+ FFAvgTau: f32,
+ FFPrv: f32,
+ ClampExtMin: f32,
+ FSDt: f32,
+ SSfDt: f32,
+ SSiDt: f32,
+ FFAvgDt: f32,
+ pad: f32,
+}
+
+///////////// import: "fsfffb-inhib.go"
+alias InhibVars = i32; //enums:enum
+const FFsRaw: InhibVars = 0;
+const FBsRaw: InhibVars = 1;
+const GeExtRaw: InhibVars = 2;
+const FFs: InhibVars = 3;
+const FBs: InhibVars = 4;
+const GeExts: InhibVars = 5;
+const FSi: InhibVars = 6;
+const SSi: InhibVars = 7;
+const SSf: InhibVars = 8;
+const FSGi: InhibVars = 9;
+const SSGi: InhibVars = 10;
+const TotalGi: InhibVars = 11;
+const GiOrig: InhibVars = 12;
+const LayGi: InhibVars = 13;
+const FFAvg: InhibVars = 14;
+const FFAvgPrv: InhibVars = 15;
+
+///////////// import: "globals.go"
+alias GlobalScalarVars = i32; //enums:enum
+const GvRew: GlobalScalarVars = 0;
+const GvHasRew: GlobalScalarVars = 1;
+const GvRewPred: GlobalScalarVars = 2;
+const GvPrevPred: GlobalScalarVars = 3;
+const GvHadRew: GlobalScalarVars = 4;
+const GvDA: GlobalScalarVars = 5;
+const GvDAtonic: GlobalScalarVars = 6;
+const GvACh: GlobalScalarVars = 7;
+const GvNE: GlobalScalarVars = 8;
+const GvSer: GlobalScalarVars = 9;
+const GvAChRaw: GlobalScalarVars = 10;
+const GvGoalMaint: GlobalScalarVars = 11;
+const GvVSMatrixJustGated: GlobalScalarVars = 12;
+const GvVSMatrixHasGated: GlobalScalarVars = 13;
+const GvCuriosityPoolGated: GlobalScalarVars = 14;
+const GvTime: GlobalScalarVars = 15;
+const GvEffort: GlobalScalarVars = 16;
+const GvUrgencyRaw: GlobalScalarVars = 17;
+const GvUrgency: GlobalScalarVars = 18;
+const GvHasPosUS: GlobalScalarVars = 19;
+const GvHadPosUS: GlobalScalarVars = 20;
+const GvNegUSOutcome: GlobalScalarVars = 21;
+const GvHadNegUSOutcome: GlobalScalarVars = 22;
+const GvPVposSum: GlobalScalarVars = 23;
+const GvPVpos: GlobalScalarVars = 24;
+const GvPVnegSum: GlobalScalarVars = 25;
+const GvPVneg: GlobalScalarVars = 26;
+const GvPVposEst: GlobalScalarVars = 27;
+const GvPVposVar: GlobalScalarVars = 28;
+const GvPVnegEst: GlobalScalarVars = 29;
+const GvPVnegVar: GlobalScalarVars = 30;
+const GvGoalDistEst: GlobalScalarVars = 31;
+const GvGoalDistPrev: GlobalScalarVars = 32;
+const GvProgressRate: GlobalScalarVars = 33;
+const GvGiveUpUtility: GlobalScalarVars = 34;
+const GvContUtility: GlobalScalarVars = 35;
+const GvGiveUpTiming: GlobalScalarVars = 36;
+const GvContTiming: GlobalScalarVars = 37;
+const GvGiveUpProgress: GlobalScalarVars = 38;
+const GvContProgress: GlobalScalarVars = 39;
+const GvGiveUpSum: GlobalScalarVars = 40;
+const GvContSum: GlobalScalarVars = 41;
+const GvGiveUpProb: GlobalScalarVars = 42;
+const GvGiveUp: GlobalScalarVars = 43;
+const GvGaveUp: GlobalScalarVars = 44;
+const GvVSPatchPos: GlobalScalarVars = 45;
+const GvVSPatchPosThr: GlobalScalarVars = 46;
+const GvVSPatchPosRPE: GlobalScalarVars = 47;
+const GvVSPatchPosSum: GlobalScalarVars = 48;
+const GvVSPatchPosPrev: GlobalScalarVars = 49;
+const GvVSPatchPosVar: GlobalScalarVars = 50;
+const GvLHbDip: GlobalScalarVars = 51;
+const GvLHbBurst: GlobalScalarVars = 52;
+const GvLHbPVDA: GlobalScalarVars = 53;
+const GvCeMpos: GlobalScalarVars = 54;
+const GvCeMneg: GlobalScalarVars = 55;
+const GvVtaDA: GlobalScalarVars = 56;
+const MaxGlobalVecN = 16;
+alias GlobalVectorVars = i32; //enums:enum
+const GvCost: GlobalVectorVars = 0;
+const GvCostRaw: GlobalVectorVars = 1;
+const GvUSneg: GlobalVectorVars = 2;
+const GvUSnegRaw: GlobalVectorVars = 3;
+const GvDrives: GlobalVectorVars = 4;
+const GvUSpos: GlobalVectorVars = 5;
+const GvVSPatchD1: GlobalVectorVars = 6;
+const GvVSPatchD2: GlobalVectorVars = 7;
+const GvOFCposPTMaint: GlobalVectorVars = 8;
+const GvVSMatrixPoolGated: GlobalVectorVars = 9;
+
+///////////// import: "hip_paths.go"
+struct HipPathParams {
+ Hebb: f32,
+ Err: f32,
+ SAvgCor: f32,
+ SAvgThr: f32,
+ SNominal: f32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+
+///////////// import: "inhib.go"
+struct ActAvgParams {
+ Nominal: f32,
+ AdaptGi: i32,
+ Offset: f32,
+ HiTol: f32,
+ LoTol: f32,
+ AdaptRate: f32,
+ pad: f32,
+ pad1: f32,
+}
+struct InhibParams {
+ ActAvg: ActAvgParams,
+ Layer: GiParams,
+ Pool: GiParams,
+}
+
+///////////// import: "init-layer.go"
+
+///////////// import: "kinase-params.go"
+struct CaDtParams { //types:add
+ MTau: f32,
+ PTau: f32,
+ DTau: f32,
+ MDt: f32,
+ PDt: f32,
+ DDt: f32,
+ pad: i32,
+ pad1: i32,
+}
+struct NeurCaParams {
+ SpikeG: f32,
+ SynTau: f32,
+ SynDt: f32,
+ pad: i32,
+ Dt: CaDtParams,
+}
+struct SynCaParams { //types:add
+ CaScale: f32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+ Dt: CaDtParams,
+}
+struct BinWeights { //types:add
+ Bin0: f32,
+ Bin1: f32,
+ Bin2: f32,
+ Bin3: f32,
+ Bin4: f32,
+ Bin5: f32,
+ Bin6: f32,
+ Bin7: f32,
+}
+struct SynCaLinear { //types:add
+ CaP: BinWeights,
+ CaD: BinWeights,
+ CaGain: f32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+
+///////////// import: "layerparams.go"
+struct LayerIndexes {
+ NPools: u32,
+ NeurSt: u32,
+ NNeurons: u32,
+ RecvSt: u32,
+ RecvN: u32,
+ SendSt: u32,
+ SendN: u32,
+ ExtsSt: u32,
+ ShpPlY: i32,
+ ShpPlX: i32,
+ ShpUnY: i32,
+ ShpUnX: i32,
+}
+struct LayerInhibIndexes {
+ Index1: i32,
+ Index2: i32,
+ Index3: i32,
+ Index4: i32,
+}
+struct LayerParams {
+ Type: LayerTypes,
+ Index: u32,
+ MaxData: u32,
+ PoolSt: u32,
+ Acts: ActParams,
+ Inhib: InhibParams,
+ LayInhib: LayerInhibIndexes,
+ Learn: LearnNeurParams,
+ Bursts: BurstParams,
+ CT: CTParams,
+ Pulv: PulvParams,
+ Matrix: MatrixParams,
+ GP: GPParams,
+ LDT: LDTParams,
+ VTA: VTAParams,
+ RWPred: RWPredParams,
+ RWDa: RWDaParams,
+ TDInteg: TDIntegParams,
+ TDDa: TDDaParams,
+ Indexes: LayerIndexes,
+}
+
+///////////// import: "layertypes.go"
+alias LayerTypes = i32; //enums:enum
+const SuperLayer: LayerTypes = 0;
+const InputLayer: LayerTypes = 1;
+const TargetLayer: LayerTypes = 2;
+const CompareLayer: LayerTypes = 3;
+const CTLayer: LayerTypes = 4;
+const PulvinarLayer: LayerTypes = 5;
+const TRNLayer: LayerTypes = 6;
+const PTMaintLayer: LayerTypes = 7;
+const PTPredLayer: LayerTypes = 8;
+const MatrixLayer: LayerTypes = 9;
+const STNLayer: LayerTypes = 10;
+const GPLayer: LayerTypes = 11;
+const BGThalLayer: LayerTypes = 12;
+const VSGatedLayer: LayerTypes = 13;
+const BLALayer: LayerTypes = 14;
+const CeMLayer: LayerTypes = 15;
+const VSPatchLayer: LayerTypes = 16;
+const LHbLayer: LayerTypes = 17;
+const DrivesLayer: LayerTypes = 18;
+const UrgencyLayer: LayerTypes = 19;
+const USLayer: LayerTypes = 20;
+const PVLayer: LayerTypes = 21;
+const LDTLayer: LayerTypes = 22;
+const VTALayer: LayerTypes = 23;
+const RewLayer: LayerTypes = 24;
+const RWPredLayer: LayerTypes = 25;
+const RWDaLayer: LayerTypes = 26;
+const TDPredLayer: LayerTypes = 27;
+const TDIntegLayer: LayerTypes = 28;
+const TDDaLayer: LayerTypes = 29;
+
+///////////// import: "layervars.go"
+alias LayerVars = i32; //enums:enum
+const LayerActMAvg: LayerVars = 0;
+const LayerActPAvg: LayerVars = 1;
+const LayerAvgMaxGeM: LayerVars = 2;
+const LayerAvgMaxGiM: LayerVars = 3;
+const LayerGiMult: LayerVars = 4;
+const LayerPhaseDiff: LayerVars = 5;
+const LayerPhaseDiffAvg: LayerVars = 6;
+const LayerPhaseDiffVar: LayerVars = 7;
+const LayerRT: LayerVars = 8;
+const LayerRewPredPos: LayerVars = 9;
+const LayerRewPredNeg: LayerVars = 10;
+
+///////////// import: "learn-layer.go"
+
+///////////// import: "learn-net.go"
+
+///////////// import: "learn-path.go"
+
+///////////// import: "learn.go"
+struct LearnCaParams {
+ Norm: f32,
+ SpkVGCC: i32,
+ SpkVgccCa: f32,
+ VgccTau: f32,
+ Dt: CaDtParams,
+ UpdateThr: f32,
+ VgccDt: f32,
+ NormInv: f32,
+ pad: i32,
+}
+struct TrgAvgActParams {
+ GiBaseInit: f32,
+ RescaleOn: i32,
+ ErrLRate: f32,
+ SynScaleRate: f32,
+ SubMean: f32,
+ Permute: i32,
+ Pool: i32,
+ pad: i32,
+ TrgRange: F32,
+}
+struct RLRateParams {
+ On: i32,
+ SigmoidLinear: i32,
+ SigmoidMin: f32,
+ Diff: i32,
+ SpkThr: f32,
+ DiffThr: f32,
+ Min: f32,
+ pad: i32,
+}
+struct LearnNeurParams {
+ CaLearn: LearnCaParams,
+ CaSpk: NeurCaParams,
+ LrnNMDA: NMDAParams,
+ TrgAvgAct: TrgAvgActParams,
+ RLRate: RLRateParams,
+ NeuroMod: NeuroModParams,
+}
+struct SWtInitParams {
+ SPct: f32,
+ Mean: f32,
+ Var: f32,
+ Sym: i32,
+}
+struct SWtAdaptParams {
+ On: i32,
+ LRate: f32,
+ SubMean: f32,
+ SigGain: f32,
+}
+struct SWtParams {
+ Init: SWtInitParams,
+ Adapt: SWtAdaptParams,
+ Limit: F32,
+}
+struct LRateParams {
+ Base: f32,
+ Sched: f32,
+ Mod: f32,
+ Eff: f32,
+}
+struct TraceParams {
+ Tau: f32,
+ SubMean: f32,
+ LearnThr: f32,
+ Dt: f32,
+}
+struct LRateMod {
+ On: i32,
+ Base: f32,
+ pad: i32,
+ pad1: i32,
+ Range: F32,
+}
+struct HebbParams {
+ On: i32,
+ Up: f32,
+ Down: f32,
+ pad: f32,
+}
+struct LearnSynParams {
+ Learn: i32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+ LRate: LRateParams,
+ Trace: TraceParams,
+ KinaseCa: SynCaLinear,
+ Hebb: HebbParams,
+}
+
+///////////// import: "looper.go"
+alias ViewTimes = i32; //enums:enum
+const Cycle: ViewTimes = 0;
+const FastSpike: ViewTimes = 1;
+const Gamma: ViewTimes = 2;
+const Beta: ViewTimes = 3;
+const Alpha: ViewTimes = 4;
+const Phase: ViewTimes = 5;
+const Theta: ViewTimes = 6;
+
+///////////// import: "math32-fastexp.go"
+
+///////////// import: "minmax-avgmax.go"
+const MaxFloat32: f32 = 3.402823466e+38;
+const MinFloat32: f32 = 1.175494351e-38;
+struct AvgMax32 {
+ Avg: f32,
+ Max: f32,
+ Sum: f32,
+ MaxIndex: i32,
+ N: i32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+}
+
+///////////// import: "minmax-minmax32.go"
+struct F32 {
+ Min: f32,
+ Max: f32,
+ pad: i32,
+ pad1: i32, // for gpu use
+}
+
+///////////// import: "network.go"
+struct NetworkIndexes {
+ MaxData: u32,
+ MaxDelay: u32,
+ NLayers: u32,
+ NNeurons: u32,
+ NPools: u32,
+ NPaths: u32,
+ NSyns: u32,
+ RubiconNPosUSs: u32,
+ RubiconNCosts: u32,
+ RubiconNNegUSs: u32,
+ GPUMaxBuffFloats: u32,
+ GPUSynCaBanks: u32,
+}
+
+///////////// import: "neuromod.go"
+alias DAModTypes = i32; //enums:enum
+const NoDAMod: DAModTypes = 0;
+const D1Mod: DAModTypes = 1;
+const D2Mod: DAModTypes = 2;
+const D1AbsMod: DAModTypes = 3;
+alias ValenceTypes = i32; //enums:enum
+const Positive: ValenceTypes = 0;
+const Negative: ValenceTypes = 1;
+const Cost: ValenceTypes = 2;
+struct NeuroModParams {
+ DAMod: DAModTypes,
+ Valence: ValenceTypes,
+ DAModGain: f32,
+ DALRateSign: i32,
+ DALRateMod: f32,
+ AChLRateMod: f32,
+ AChDisInhib: f32,
+ BurstGain: f32,
+ DipGain: f32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+
+///////////// import: "neuron.go"
+alias NeuronFlags = i32; //enums:enum
+const NeuronOff: NeuronFlags = 1;
+const NeuronHasExt: NeuronFlags = 2;
+const NeuronHasTarg: NeuronFlags = 4;
+const NeuronHasCmpr: NeuronFlags = 8;
+alias NeuronVars = i32; //enums:enum
+const Spike: NeuronVars = 0;
+const Spiked: NeuronVars = 1;
+const Act: NeuronVars = 2;
+const ActInt: NeuronVars = 3;
+const Ge: NeuronVars = 4;
+const Gi: NeuronVars = 5;
+const Gk: NeuronVars = 6;
+const Inet: NeuronVars = 7;
+const Vm: NeuronVars = 8;
+const VmDend: NeuronVars = 9;
+const ISI: NeuronVars = 10;
+const ISIAvg: NeuronVars = 11;
+const Ext: NeuronVars = 12;
+const Target: NeuronVars = 13;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
+alias NeuronAvgVars = i32; //enums:enum
+const ActAvg: NeuronAvgVars = 0;
+const AvgPct: NeuronAvgVars = 1;
+const TrgAvg: NeuronAvgVars = 2;
+const DTrgAvg: NeuronAvgVars = 3;
+const AvgDif: NeuronAvgVars = 4;
+const GeBase: NeuronAvgVars = 5;
+const GiBase: NeuronAvgVars = 6;
+alias NeuronIndexVars = i32; //enums:enum
+const NrnNeurIndex: NeuronIndexVars = 0;
+const NrnLayIndex: NeuronIndexVars = 1;
+const NrnSubPool: NeuronIndexVars = 2;
+
+///////////// import: "pathparams.go"
+const StartOff: i32 = 0;
+const Nitems: i32 = 1;
+const StartNN: i32 = 2;
+struct StartN {
+ Start: u32,
+ N: u32,
+ pad: u32,
+ pad1: u32, // todo: see if we can do without these?
+}
+struct PathIndexes {
+ RecvLayer: u32,
+ RecvNeurSt: u32,
+ RecvNeurN: u32,
+ SendLayer: u32,
+ SendNeurSt: u32,
+ SendNeurN: u32,
+ SynapseSt: u32,
+ SendConSt: u32,
+ RecvConSt: u32,
+ RecvSynSt: u32,
+ NPathNeurSt: u32,
+ pad: u32,
+}
+struct GScaleValues {
+ Scale: f32,
+ Rel: f32,
+ pad: f32,
+ pad1: f32,
+}
+struct PathParams {
+ Type: PathTypes,
+ Index: u32,
+ pad: i32,
+ pad1: i32,
+ Indexes: PathIndexes,
+ Com: SynComParams,
+ PathScale: PathScaleParams,
+ SWts: SWtParams,
+ Learn: LearnSynParams,
+ GScale: GScaleValues,
+ RLPred: RLPredPathParams,
+ Matrix: MatrixPathParams,
+ BLA: BLAPathParams,
+ Hip: HipPathParams,
+}
+
+///////////// import: "pathtypes.go"
+alias PathTypes = i32; //enums:enum
+const ForwardPath: PathTypes = 0;
+const BackPath: PathTypes = 1;
+const LateralPath: PathTypes = 2;
+const InhibPath: PathTypes = 3;
+const CTCtxtPath: PathTypes = 4;
+const RWPath: PathTypes = 5;
+const TDPredPath: PathTypes = 6;
+const BLAPath: PathTypes = 7;
+const HipPath: PathTypes = 8;
+const VSPatchPath: PathTypes = 9;
+const VSMatrixPath: PathTypes = 10;
+const DSMatrixPath: PathTypes = 11;
+
+///////////// import: "pcore-layer.go"
+struct MatrixParams {
+ GateThr: f32,
+ IsVS: i32,
+ OtherMatrixIndex: i32,
+ ThalLay1Index: i32,
+ ThalLay2Index: i32,
+ ThalLay3Index: i32,
+ ThalLay4Index: i32,
+ ThalLay5Index: i32,
+ ThalLay6Index: i32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+}
+alias GPLayerTypes = i32; //enums:enum
+const GPePr: GPLayerTypes = 0;
+const GPeAk: GPLayerTypes = 1;
+const GPi: GPLayerTypes = 2;
+struct GPParams {
+ GPType: GPLayerTypes,
+ pad: u32,
+ pad1: u32,
+ pad2: u32,
+}
+
+///////////// import: "pcore-path.go"
+struct MatrixPathParams {
+ Credit: f32,
+ BasePF: f32,
+ Delta: f32,
+ VSRewLearn: i32,
+}
+
+///////////// import: "pool.go"
+alias PoolIntVars = i32; //enums:enum
+const PoolNeurSt: PoolIntVars = 0;
+const PoolNeurEd: PoolIntVars = 1;
+const PoolLayerIdx: PoolIntVars = 2;
+const PoolIsLayer: PoolIntVars = 3;
+const Clamped: PoolIntVars = 4;
+const PoolGated: PoolIntVars = 5;
+const FFsRawInt: PoolIntVars = 6;
+const FBsRawInt: PoolIntVars = 7;
+const GeExtRawInt: PoolIntVars = 8;
+const PoolIntAvgMaxStart: PoolIntVars = 9;
+alias AvgMax = i32; //enums:enum
+const Avg: AvgMax = 0;
+const Max: AvgMax = 1;
+alias AvgMaxPhases = i32; //enums:enum -trim-prefix AM
+const AMCycle: AvgMaxPhases = 0;
+const AMMinus: AvgMaxPhases = 1;
+const AMPlus: AvgMaxPhases = 2;
+const AMPrev: AvgMaxPhases = 3;
+alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
+const AMSpkMax: AvgMaxVars = 2;
+const AMAct: AvgMaxVars = 3;
+const AMGeInt: AvgMaxVars = 4;
+const AMGiInt: AvgMaxVars = 5;
+const AMAvgDif: AvgMaxVars = 6;
+const poolFloatAvgMaxStart = InhibVarsN;
+const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
+const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
+
+///////////// import: "rand.go"
+alias RandFunIndex = u32;
+const RandFunActPGe: RandFunIndex = 0;
+const RandFunActPGi: RandFunIndex = 1;
+const RandFunActSMaintP: RandFunIndex = 2;
+const RandFunIndexN: RandFunIndex = 3;
+
+///////////// import: "rl-layer.go"
+struct RWPredParams {
+ PredRange: F32,
+}
+struct RWDaParams {
+ TonicGe: f32,
+ RWPredLayIndex: i32,
+ pad: u32,
+ pad1: u32,
+}
+struct TDIntegParams {
+ Discount: f32,
+ PredGain: f32,
+ TDPredLayIndex: i32,
+ pad: u32,
+}
+struct TDDaParams {
+ TonicGe: f32,
+ TDIntegLayIndex: i32,
+ pad: u32,
+ pad1: u32,
+}
+
+///////////// import: "rl-path.go"
+struct RLPredPathParams {
+ OppSignLRate: f32,
+ DaTol: f32,
+ pad: f32,
+ pad1: f32,
+}
+
+///////////// import: "rubicon-layer.go"
+struct LDTParams {
+ SrcThr: f32,
+ Rew: i32,
+ MaintInhib: f32,
+ SrcLay1Index: i32,
+ SrcLay2Index: i32,
+ SrcLay3Index: i32,
+ SrcLay4Index: i32,
+ pad: f32,
+}
+struct VTAParams {
+ CeMGain: f32,
+ LHbGain: f32,
+ AChThr: f32,
+ pad: f32,
+}
+
+///////////// import: "rubicon-path.go"
+struct BLAPathParams {
+ NegDeltaLRate: f32,
+ AChThr: f32,
+ USTrace: f32,
+ pad: f32,
+}
+
+///////////// import: "rubicon.go"
+
+///////////// import: "stats.go"
+
+///////////// import: "synapse.go"
+alias SynapseVars = i32; //enums:enum
+const Wt: SynapseVars = 0;
+const LWt: SynapseVars = 1;
+const SWt: SynapseVars = 2;
+const DWt: SynapseVars = 3;
+const DSWt: SynapseVars = 4;
+alias SynapseTraceVars = i32; //enums:enum
+const Tr: SynapseTraceVars = 0;
+const DTr: SynapseTraceVars = 1;
+const DiDWt: SynapseTraceVars = 2;
+alias SynapseIndexVars = i32; //enums:enum
+const SynRecvIndex: SynapseIndexVars = 0;
+const SynSendIndex: SynapseIndexVars = 1;
+const SynPathIndex: SynapseIndexVars = 2;
+
+///////////// import: "slrand.wgsl"
+fn Philox2x32round(counter: su64, key: u32) -> su64 {
+ let mul = Uint32Mul64(u32(0xD256D193), counter.x);
+ var ctr: su64;
+ ctr.x = mul.y ^ key ^ counter.y;
+ ctr.y = mul.x;
+ return ctr;
+}
+fn Philox2x32bumpkey(key: u32) -> u32 {
+ return key + u32(0x9E3779B9);
+}
+fn Philox2x32(counter: su64, key: u32) -> vec2 {
+ var ctr = Philox2x32round(counter, key); // 1
+ var ky = Philox2x32bumpkey(key);
+ ctr = Philox2x32round(ctr, ky); // 2
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 3
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 4
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 5
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 6
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 7
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 8
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 9
+ ky = Philox2x32bumpkey(ky);
+ return Philox2x32round(ctr, ky); // 10
+}
+fn RandUint32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 {
+ return Philox2x32(Uint64Add32(counter, funcIndex), key);
+}
+fn RandUint32(counter: su64, funcIndex: u32, key: u32) -> u32 {
+ return Philox2x32(Uint64Add32(counter, funcIndex), key).x;
+}
+fn RandFloat32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 {
+ return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key));
+}
+fn RandFloat32(counter: su64, funcIndex: u32, key: u32) -> f32 {
+ return Uint32ToFloat32(RandUint32(counter, funcIndex, key));
+}
+fn RandFloat32Range11Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 {
+ return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key));
+}
+fn RandFloat32Range11(counter: su64, funcIndex: u32, key: u32) -> f32 {
+ return Uint32ToFloat32Range11(RandUint32(counter, funcIndex, key));
+}
+fn RandBoolP(counter: su64, funcIndex: u32, key: u32, p: f32) -> bool {
+ return (RandFloat32(counter, funcIndex, key) < p);
+}
+fn sincospi(x: f32) -> vec2 {
+ let PIf = 3.1415926535897932;
+ var r: vec2;
+ r.x = cos(PIf*x);
+ r.y = sin(PIf*x);
+ return r;
+}
+fn RandFloat32NormVec2(counter: su64, funcIndex: u32, key: u32) -> vec2 {
+ let ur = RandUint32Vec2(counter, funcIndex, key);
+ var f = sincospi(Uint32ToFloat32Range11(ur.x));
+ let r = sqrt(-2.0 * log(Uint32ToFloat32(ur.y))); // guaranteed to avoid 0.
+ return f * r;
+}
+fn RandFloat32Norm(counter: su64, funcIndex: u32, key: u32) -> f32 {
+ return RandFloat32Vec2(counter, funcIndex, key).x;
+}
+fn RandUint32N(counter: su64, funcIndex: u32, key: u32, n: u32) -> u32 {
+ let v = RandFloat32(counter, funcIndex, key);
+ return u32(v * f32(n));
+}
+struct RandCounter {
+ Counter: su64,
+ HiSeed: u32,
+ pad: u32,
+}
+fn RandCounter_Reset(ct: ptr) {
+ (*ct).Counter.x = u32(0);
+ (*ct).Counter.y = (*ct).HiSeed;
+}
+fn RandCounter_Seed(ct: ptr, seed: u32) {
+ (*ct).HiSeed = seed;
+ RandCounter_Reset(ct);
+}
+fn RandCounter_Add(ct: ptr, inc: u32) {
+ (*ct).Counter = Uint64Add32((*ct).Counter, inc);
+}
+
+///////////// import: "sltype.wgsl"
+alias su64 = vec2;
+fn Uint32Mul64(a: u32, b: u32) -> su64 {
+ let LOMASK = (((u32(1))<<16)-1);
+ var r: su64;
+ r.x = a * b; /* full low multiply */
+ let ahi = a >> 16;
+ let alo = a & LOMASK;
+ let bhi = b >> 16;
+ let blo = b & LOMASK;
+ let ahbl = ahi * blo;
+ let albh = alo * bhi;
+ let ahbl_albh = ((ahbl&LOMASK) + (albh&LOMASK));
+ var hit = ahi*bhi + (ahbl>>16) + (albh>>16);
+ hit += ahbl_albh >> 16; /* carry from the sum of lo(ahbl) + lo(albh) ) */
+ /* carry from the sum with alo*blo */
+ if ((r.x >> u32(16)) < (ahbl_albh&LOMASK)) {
+ hit += u32(1);
+ }
+ r.y = hit;
+ return r;
+}
+/*
+fn Uint32Mul64(a: u32, b: u32) -> su64 {
+ return su64(a) * su64(b);
+}
+*/
+fn Uint64Add32(a: su64, b: u32) -> su64 {
+ if (b == 0) {
+ return a;
+ }
+ var s = a;
+ if (s.x > u32(0xffffffff) - b) {
+ s.y++;
+ s.x = (b - 1) - (u32(0xffffffff) - s.x);
+ } else {
+ s.x += b;
+ }
+ return s;
+}
+fn Uint64Incr(a: su64) -> su64 {
+ var s = a;
+ if(s.x == 0xffffffff) {
+ s.y++;
+ s.x = u32(0);
+ } else {
+ s.x++;
+ }
+ return s;
+}
+fn Uint32ToFloat32(val: u32) -> f32 {
+ let factor = f32(1.0) / (f32(u32(0xffffffff)) + f32(1.0));
+ let halffactor = f32(0.5) * factor;
+ var f = f32(val) * factor + halffactor;
+ if (f == 1.0) { // exclude 1
+ return bitcast(0x3F7FFFFF);
+ }
+ return f;
+}
+fn Uint32ToFloat32Vec2(val: vec2) -> vec2 {
+ var r: vec2;
+ r.x = Uint32ToFloat32(val.x);
+ r.y = Uint32ToFloat32(val.y);
+ return r;
+}
+fn Uint32ToFloat32Range11(val: u32) -> f32 {
+ let factor = f32(1.0) / (f32(i32(0x7fffffff)) + f32(1.0));
+ let halffactor = f32(0.5) * factor;
+ return (f32(val) * factor + halffactor);
+}
+fn Uint32ToFloat32Range11Vec2(val: vec2) -> vec2 {
+ var r: vec2;
+ r.x = Uint32ToFloat32Range11(val.x);
+ r.y = Uint32ToFloat32Range11(val.y);
+ return r;
+}
\ No newline at end of file
diff --git a/axon/shaders/Beta2Neuron.wgsl b/axon/shaders/Beta2Neuron.wgsl
new file mode 100644
index 000000000..95cf1fc6d
--- /dev/null
+++ b/axon/shaders/Beta2Neuron.wgsl
@@ -0,0 +1,1386 @@
+// Code generated by "gosl"; DO NOT EDIT
+// kernel: Beta2Neuron
+
+// // Layers are all the layer parameters.
+@group(0) @binding(0)
+var Layers: array;
+@group(0) @binding(1)
+var Paths: array;
+// // NetworkIxs have indexes and sizes for entire network (one only).
+@group(1) @binding(0)
+var NetworkIxs: array;
+@group(1) @binding(1)
+var NeuronIxs: array;
+@group(1) @binding(2)
+var SynapseIxs: array;
+@group(1) @binding(3)
+var PathSendCon: array;
+@group(1) @binding(4)
+var RecvPathIxs: array;
+@group(1) @binding(5)
+var PathRecvCon: array;
+@group(1) @binding(6)
+var RecvSynIxs: array;
+// // Ctx is the current context state (one only).
+@group(2) @binding(0)
+var Ctx: array;
+@group(2) @binding(1)
+var Neurons: array;
+@group(2) @binding(2)
+var NeuronAvgs: array;
+@group(2) @binding(3)
+var LayerStates: array;
+@group(2) @binding(4)
+var GlobalScalars: array;
+@group(2) @binding(5)
+var GlobalVectors: array;
+@group(2) @binding(6)
+var Exts: array;
+// // Pools are the [PoolVars] float32 state values for layer and sub-pool inhibition, // Including the float32 AvgMax values by Phase and variable: use [AvgMaxVarIndex]. // [Layer * Pools][PoolVars+AvgMax][Data]
+@group(3) @binding(0)
+var Pools: array;
+@group(3) @binding(1)
+var PoolsInt: array;
+@group(3) @binding(2)
+var PathGBuf: array;
+@group(3) @binding(3)
+var PathGSyns: array;
+@group(3) @binding(4)
+var Synapses: array;
+@group(3) @binding(5)
+var SynapseTraces: array;
+
+alias GPUVars = i32;
+
+@compute @workgroup_size(64, 1, 1)
+fn main(@builtin(global_invocation_id) idx: vec3) {
+ Beta2Neuron(idx.x);
+}
+
+fn IndexU322D(s0: u32, s1: u32, i0: u32, i1: u32) -> u32 {
+ return u32(2) + s0 * i0 + s1 * i1;
+}
+
+fn IndexU321D(s0: u32, i0: u32) -> u32 {
+ return u32(1) + s0 * i0;
+}
+
+fn IndexF323D(s0: f32, s1: f32, s2: f32, i0: u32, i1: u32, i2: u32) -> u32 {
+ return u32(3) + bitcast(s0) * i0 + bitcast(s1) * i1 + bitcast(s2) * i2;
+}
+
+fn IndexF322D(s0: f32, s1: f32, i0: u32, i1: u32) -> u32 {
+ return u32(2) + bitcast(s0) * i0 + bitcast(s1) * i1;
+}
+
+fn IndexI323D(s0: i32, s1: i32, s2: i32, i0: u32, i1: u32, i2: u32) -> u32 {
+ return u32(3) + u32(s0) * i0 + u32(s1) * i1 + u32(s2) * i2;
+}
+
+
+///////////// import: "vars.go"
+
+///////////// import: "act-layer.go"
+fn LayerParams_Beta2Neuron(ly: ptr, ctx: ptr, ni: u32,di: u32) {
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Beta2))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaP))];
+}
+
+///////////// import: "act-net.go"
+fn Beta2Neuron(i: u32) { //gosl:kernel
+ var ctx = Ctx[0];
+ var di = Context_DataIndex(&ctx, i);
+ var ni = Context_ItemIndex(&ctx, i);
+ var li = NeuronIxs[IndexU322D(NeuronIxs[0], NeuronIxs[1], u32(ni),u32(NrnLayIndex))];
+ var layers=Layers[li]; LayerParams_Beta2Neuron(&layers, &ctx, ni, di);
+ Ctx[0] = ctx;
+}
+
+///////////// import: "act-path.go"
+alias PathGTypes = i32; //enums:enum
+const ExcitatoryG: PathGTypes = 0;
+const InhibitoryG: PathGTypes = 1;
+const ModulatoryG: PathGTypes = 2;
+const MaintG: PathGTypes = 3;
+const ContextG: PathGTypes = 4;
+struct SynComParams {
+ GType: PathGTypes,
+ Delay: u32,
+ MaxDelay: u32,
+ DelLen: u32,
+}
+struct PathScaleParams {
+ Rel: f32,
+ Abs: f32,
+ pad: f32,
+ pad1: f32,
+}
+
+///////////// import: "act.go"
+struct SpikeParams {
+ Thr: f32,
+ VmR: f32,
+ Tr: i32,
+ RTau: f32,
+ Exp: i32,
+ ExpSlope: f32,
+ ExpThr: f32,
+ MaxHz: f32,
+ ISITau: f32,
+ ISIDt: f32,
+ RDt: f32,
+ pad: i32,
+}
+struct DendParams {
+ GbarExp: f32,
+ GbarR: f32,
+ SSGi: f32,
+ HasMod: i32,
+ ModGain: f32,
+ ModACh: i32,
+ ModBase: f32,
+ pad: i32,
+}
+struct ActInitParams {
+ Vm: f32,
+ Act: f32,
+ GeBase: f32,
+ GiBase: f32,
+ GeVar: f32,
+ GiVar: f32,
+ pad: i32,
+ pad1: i32,
+}
+struct DecayParams {
+ Act: f32,
+ Glong: f32,
+ AHP: f32,
+ LearnCa: f32,
+ OnRew: i32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+struct DtParams {
+ Integ: f32,
+ VmTau: f32,
+ VmDendTau: f32,
+ VmSteps: i32,
+ GeTau: f32,
+ GiTau: f32,
+ IntTau: f32,
+ LongAvgTau: f32,
+ MaxCycStart: i32,
+ VmDt: f32,
+ VmDendDt: f32,
+ DtStep: f32,
+ GeDt: f32,
+ GiDt: f32,
+ IntDt: f32,
+ LongAvgDt: f32,
+}
+struct SpikeNoiseParams {
+ On: i32,
+ GeHz: f32,
+ Ge: f32,
+ GiHz: f32,
+ Gi: f32,
+ MaintGe: i32,
+ GeExpInt: f32,
+ GiExpInt: f32,
+}
+struct ClampParams {
+ IsInput: i32,
+ IsTarget: i32,
+ Ge: f32,
+ Add: i32,
+ ErrThr: f32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+struct SMaintParams {
+ On: i32,
+ NNeurons: f32,
+ Gbar: f32,
+ Inhib: f32,
+ ISI: F32,
+}
+struct PopCodeParams {
+ On: i32,
+ Ge: f32,
+ Min: f32,
+ Max: f32,
+ MinAct: f32,
+ MinSigma: f32,
+ MaxSigma: f32,
+ Clip: i32,
+}
+struct ActParams {
+ Spikes: SpikeParams,
+ Dend: DendParams,
+ Init: ActInitParams,
+ Decay: DecayParams,
+ Dt: DtParams,
+ Gbar: Chans,
+ Erev: Chans,
+ Clamp: ClampParams,
+ Noise: SpikeNoiseParams,
+ VmRange: F32,
+ Mahp: MahpParams,
+ Sahp: SahpParams,
+ KNa: KNaMedSlow,
+ Kir: KirParams,
+ NMDA: NMDAParams,
+ MaintNMDA: NMDAParams,
+ GabaB: GABABParams,
+ VGCC: VGCCParams,
+ AK: AKsParams,
+ SKCa: SKCaParams,
+ SMaint: SMaintParams,
+ PopCode: PopCodeParams,
+}
+
+///////////// import: "chans-ak.go"
+struct AKsParams {
+ Gbar: f32,
+ Hf: f32,
+ Mf: f32,
+ Voff: f32,
+ Vmax: f32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+}
+
+///////////// import: "chans-chans.go"
+struct Chans {
+ E: f32,
+ L: f32,
+ I: f32,
+ K: f32,
+}
+
+///////////// import: "chans-gabab.go"
+struct GABABParams {
+ Gbar: f32,
+ RiseTau: f32,
+ DecayTau: f32,
+ Gbase: f32,
+ GiSpike: f32,
+ MaxTime: f32,
+ TauFact: f32,
+ RiseDt: f32,
+ DecayDt: f32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+
+///////////// import: "chans-kir.go"
+struct KirParams {
+ Gbar: f32,
+ MinfOff: f32,
+ MinfTau: f32,
+ RiseOff: f32,
+ RiseTau: f32,
+ DecayOff: f32,
+ DecayTau: f32,
+ Mrest: f32,
+}
+
+///////////// import: "chans-kna.go"
+struct KNaParams {
+ On: i32,
+ Rise: f32,
+ Max: f32,
+ Tau: f32,
+ Dt: f32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+}
+struct KNaMedSlow {
+ On: i32,
+ TrialSlow: i32,
+ pad: i32,
+ pad1: i32,
+ Med: KNaParams,
+ Slow: KNaParams,
+}
+
+///////////// import: "chans-mahp.go"
+struct MahpParams {
+ Gbar: f32,
+ Voff: f32,
+ Vslope: f32,
+ TauMax: f32,
+ Tadj: f32,
+ DtMax: f32,
+ pad: i32,
+ pad2: i32,
+}
+
+///////////// import: "chans-nmda.go"
+struct NMDAParams {
+ Gbar: f32,
+ Tau: f32,
+ ITau: f32,
+ MgC: f32,
+ Voff: f32,
+ Dt: f32,
+ IDt: f32,
+ MgFact: f32,
+}
+
+///////////// import: "chans-sahp.go"
+struct SahpParams {
+ Gbar: f32,
+ CaTau: f32,
+ Off: f32,
+ Slope: f32,
+ TauMax: f32,
+ CaDt: f32,
+ DtMax: f32,
+ pad: i32,
+}
+
+///////////// import: "chans-skca.go"
+struct SKCaParams {
+ Gbar: f32,
+ C50: f32,
+ ActTau: f32,
+ DeTau: f32,
+ KCaR: f32,
+ CaRDecayTau: f32,
+ CaInThr: f32,
+ CaInTau: f32,
+ ActDt: f32,
+ DeDt: f32,
+ CaRDecayDt: f32,
+ CaInDt: f32,
+}
+
+///////////// import: "chans-vgcc.go"
+struct VGCCParams {
+ Gbar: f32,
+ Ca: f32,
+ pad: i32,
+ pad1: i32,
+}
+
+///////////// import: "context.go"
+struct Context {
+ NData: u32,
+ Mode: i32,
+ Testing: i32,
+ Phase: i32,
+ PlusPhase: i32,
+ PhaseCycle: i32,
+ Cycle: i32,
+ ThetaCycles: i32,
+ CyclesTotal: i32,
+ Time: f32,
+ TrialsTotal: i32,
+ TimePerCycle: f32,
+ SlowInterval: i32,
+ SlowCounter: i32,
+ pad: i32,
+ pad1: i32,
+ RandCounter: RandCounter,
+}
+fn Context_ItemIndex(ctx: ptr, idx: u32) -> u32 {
+ return idx / (*ctx).NData;
+}
+fn Context_DataIndex(ctx: ptr, idx: u32) -> u32 {
+ return idx % (*ctx).NData;
+}
+
+///////////// import: "deep-layer.go"
+struct BurstParams {
+ ThrRel: f32,
+ ThrAbs: f32,
+ pad: f32,
+ pad1: f32,
+}
+struct CTParams {
+ GeGain: f32,
+ DecayTau: f32,
+ OFCposPT: i32,
+ DecayDt: f32,
+}
+struct PulvParams {
+ DriveScale: f32,
+ FullDriveAct: f32,
+ DriveLayIndex: i32,
+ pad: f32,
+}
+
+///////////// import: "deep-path.go"
+
+///////////// import: "enumgen.go"
+const PathGTypesN: PathGTypes = 5;
+const GlobalScalarVarsN: GlobalScalarVars = 57;
+const GlobalVectorVarsN: GlobalVectorVars = 10;
+const GPUVarsN: GPUVars = 22;
+const LayerTypesN: LayerTypes = 30;
+const LayerVarsN: LayerVars = 11;
+const ViewTimesN: ViewTimes = 7;
+const DAModTypesN: DAModTypes = 4;
+const ValenceTypesN: ValenceTypes = 3;
+const NeuronFlagsN: NeuronFlags = 9;
+const NeuronVarsN: NeuronVars = 89;
+const NeuronAvgVarsN: NeuronAvgVars = 7;
+const NeuronIndexVarsN: NeuronIndexVars = 3;
+const PathTypesN: PathTypes = 12;
+const GPLayerTypesN: GPLayerTypes = 3;
+const PoolIntVarsN: PoolIntVars = 10;
+const AvgMaxN: AvgMax = 2;
+const AvgMaxPhasesN: AvgMaxPhases = 4;
+const AvgMaxVarsN: AvgMaxVars = 7;
+const SynapseVarsN: SynapseVars = 5;
+const SynapseTraceVarsN: SynapseTraceVars = 3;
+const SynapseIndexVarsN: SynapseIndexVars = 3;
+
+///////////// import: "fsfffb-enumgen.go"
+const InhibVarsN: InhibVars = 16;
+
+///////////// import: "fsfffb-fsfffb.go"
+struct GiParams {
+ On: i32,
+ Gi: f32,
+ FB: f32,
+ FSTau: f32,
+ SS: f32,
+ SSfTau: f32,
+ SSiTau: f32,
+ FS0: f32,
+ FFAvgTau: f32,
+ FFPrv: f32,
+ ClampExtMin: f32,
+ FSDt: f32,
+ SSfDt: f32,
+ SSiDt: f32,
+ FFAvgDt: f32,
+ pad: f32,
+}
+
+///////////// import: "fsfffb-inhib.go"
+alias InhibVars = i32; //enums:enum
+const FFsRaw: InhibVars = 0;
+const FBsRaw: InhibVars = 1;
+const GeExtRaw: InhibVars = 2;
+const FFs: InhibVars = 3;
+const FBs: InhibVars = 4;
+const GeExts: InhibVars = 5;
+const FSi: InhibVars = 6;
+const SSi: InhibVars = 7;
+const SSf: InhibVars = 8;
+const FSGi: InhibVars = 9;
+const SSGi: InhibVars = 10;
+const TotalGi: InhibVars = 11;
+const GiOrig: InhibVars = 12;
+const LayGi: InhibVars = 13;
+const FFAvg: InhibVars = 14;
+const FFAvgPrv: InhibVars = 15;
+
+///////////// import: "globals.go"
+alias GlobalScalarVars = i32; //enums:enum
+const GvRew: GlobalScalarVars = 0;
+const GvHasRew: GlobalScalarVars = 1;
+const GvRewPred: GlobalScalarVars = 2;
+const GvPrevPred: GlobalScalarVars = 3;
+const GvHadRew: GlobalScalarVars = 4;
+const GvDA: GlobalScalarVars = 5;
+const GvDAtonic: GlobalScalarVars = 6;
+const GvACh: GlobalScalarVars = 7;
+const GvNE: GlobalScalarVars = 8;
+const GvSer: GlobalScalarVars = 9;
+const GvAChRaw: GlobalScalarVars = 10;
+const GvGoalMaint: GlobalScalarVars = 11;
+const GvVSMatrixJustGated: GlobalScalarVars = 12;
+const GvVSMatrixHasGated: GlobalScalarVars = 13;
+const GvCuriosityPoolGated: GlobalScalarVars = 14;
+const GvTime: GlobalScalarVars = 15;
+const GvEffort: GlobalScalarVars = 16;
+const GvUrgencyRaw: GlobalScalarVars = 17;
+const GvUrgency: GlobalScalarVars = 18;
+const GvHasPosUS: GlobalScalarVars = 19;
+const GvHadPosUS: GlobalScalarVars = 20;
+const GvNegUSOutcome: GlobalScalarVars = 21;
+const GvHadNegUSOutcome: GlobalScalarVars = 22;
+const GvPVposSum: GlobalScalarVars = 23;
+const GvPVpos: GlobalScalarVars = 24;
+const GvPVnegSum: GlobalScalarVars = 25;
+const GvPVneg: GlobalScalarVars = 26;
+const GvPVposEst: GlobalScalarVars = 27;
+const GvPVposVar: GlobalScalarVars = 28;
+const GvPVnegEst: GlobalScalarVars = 29;
+const GvPVnegVar: GlobalScalarVars = 30;
+const GvGoalDistEst: GlobalScalarVars = 31;
+const GvGoalDistPrev: GlobalScalarVars = 32;
+const GvProgressRate: GlobalScalarVars = 33;
+const GvGiveUpUtility: GlobalScalarVars = 34;
+const GvContUtility: GlobalScalarVars = 35;
+const GvGiveUpTiming: GlobalScalarVars = 36;
+const GvContTiming: GlobalScalarVars = 37;
+const GvGiveUpProgress: GlobalScalarVars = 38;
+const GvContProgress: GlobalScalarVars = 39;
+const GvGiveUpSum: GlobalScalarVars = 40;
+const GvContSum: GlobalScalarVars = 41;
+const GvGiveUpProb: GlobalScalarVars = 42;
+const GvGiveUp: GlobalScalarVars = 43;
+const GvGaveUp: GlobalScalarVars = 44;
+const GvVSPatchPos: GlobalScalarVars = 45;
+const GvVSPatchPosThr: GlobalScalarVars = 46;
+const GvVSPatchPosRPE: GlobalScalarVars = 47;
+const GvVSPatchPosSum: GlobalScalarVars = 48;
+const GvVSPatchPosPrev: GlobalScalarVars = 49;
+const GvVSPatchPosVar: GlobalScalarVars = 50;
+const GvLHbDip: GlobalScalarVars = 51;
+const GvLHbBurst: GlobalScalarVars = 52;
+const GvLHbPVDA: GlobalScalarVars = 53;
+const GvCeMpos: GlobalScalarVars = 54;
+const GvCeMneg: GlobalScalarVars = 55;
+const GvVtaDA: GlobalScalarVars = 56;
+const MaxGlobalVecN = 16;
+alias GlobalVectorVars = i32; //enums:enum
+const GvCost: GlobalVectorVars = 0;
+const GvCostRaw: GlobalVectorVars = 1;
+const GvUSneg: GlobalVectorVars = 2;
+const GvUSnegRaw: GlobalVectorVars = 3;
+const GvDrives: GlobalVectorVars = 4;
+const GvUSpos: GlobalVectorVars = 5;
+const GvVSPatchD1: GlobalVectorVars = 6;
+const GvVSPatchD2: GlobalVectorVars = 7;
+const GvOFCposPTMaint: GlobalVectorVars = 8;
+const GvVSMatrixPoolGated: GlobalVectorVars = 9;
+
+///////////// import: "hip_paths.go"
+struct HipPathParams {
+ Hebb: f32,
+ Err: f32,
+ SAvgCor: f32,
+ SAvgThr: f32,
+ SNominal: f32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+
+///////////// import: "inhib.go"
+struct ActAvgParams {
+ Nominal: f32,
+ AdaptGi: i32,
+ Offset: f32,
+ HiTol: f32,
+ LoTol: f32,
+ AdaptRate: f32,
+ pad: f32,
+ pad1: f32,
+}
+struct InhibParams {
+ ActAvg: ActAvgParams,
+ Layer: GiParams,
+ Pool: GiParams,
+}
+
+///////////// import: "init-layer.go"
+
+///////////// import: "kinase-params.go"
+struct CaDtParams { //types:add
+ MTau: f32,
+ PTau: f32,
+ DTau: f32,
+ MDt: f32,
+ PDt: f32,
+ DDt: f32,
+ pad: i32,
+ pad1: i32,
+}
+struct NeurCaParams {
+ SpikeG: f32,
+ SynTau: f32,
+ SynDt: f32,
+ pad: i32,
+ Dt: CaDtParams,
+}
+struct SynCaParams { //types:add
+ CaScale: f32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+ Dt: CaDtParams,
+}
+struct BinWeights { //types:add
+ Bin0: f32,
+ Bin1: f32,
+ Bin2: f32,
+ Bin3: f32,
+ Bin4: f32,
+ Bin5: f32,
+ Bin6: f32,
+ Bin7: f32,
+}
+struct SynCaLinear { //types:add
+ CaP: BinWeights,
+ CaD: BinWeights,
+ CaGain: f32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+
+///////////// import: "layerparams.go"
+struct LayerIndexes {
+ NPools: u32,
+ NeurSt: u32,
+ NNeurons: u32,
+ RecvSt: u32,
+ RecvN: u32,
+ SendSt: u32,
+ SendN: u32,
+ ExtsSt: u32,
+ ShpPlY: i32,
+ ShpPlX: i32,
+ ShpUnY: i32,
+ ShpUnX: i32,
+}
+struct LayerInhibIndexes {
+ Index1: i32,
+ Index2: i32,
+ Index3: i32,
+ Index4: i32,
+}
+struct LayerParams {
+ Type: LayerTypes,
+ Index: u32,
+ MaxData: u32,
+ PoolSt: u32,
+ Acts: ActParams,
+ Inhib: InhibParams,
+ LayInhib: LayerInhibIndexes,
+ Learn: LearnNeurParams,
+ Bursts: BurstParams,
+ CT: CTParams,
+ Pulv: PulvParams,
+ Matrix: MatrixParams,
+ GP: GPParams,
+ LDT: LDTParams,
+ VTA: VTAParams,
+ RWPred: RWPredParams,
+ RWDa: RWDaParams,
+ TDInteg: TDIntegParams,
+ TDDa: TDDaParams,
+ Indexes: LayerIndexes,
+}
+
+///////////// import: "layertypes.go"
+alias LayerTypes = i32; //enums:enum
+const SuperLayer: LayerTypes = 0;
+const InputLayer: LayerTypes = 1;
+const TargetLayer: LayerTypes = 2;
+const CompareLayer: LayerTypes = 3;
+const CTLayer: LayerTypes = 4;
+const PulvinarLayer: LayerTypes = 5;
+const TRNLayer: LayerTypes = 6;
+const PTMaintLayer: LayerTypes = 7;
+const PTPredLayer: LayerTypes = 8;
+const MatrixLayer: LayerTypes = 9;
+const STNLayer: LayerTypes = 10;
+const GPLayer: LayerTypes = 11;
+const BGThalLayer: LayerTypes = 12;
+const VSGatedLayer: LayerTypes = 13;
+const BLALayer: LayerTypes = 14;
+const CeMLayer: LayerTypes = 15;
+const VSPatchLayer: LayerTypes = 16;
+const LHbLayer: LayerTypes = 17;
+const DrivesLayer: LayerTypes = 18;
+const UrgencyLayer: LayerTypes = 19;
+const USLayer: LayerTypes = 20;
+const PVLayer: LayerTypes = 21;
+const LDTLayer: LayerTypes = 22;
+const VTALayer: LayerTypes = 23;
+const RewLayer: LayerTypes = 24;
+const RWPredLayer: LayerTypes = 25;
+const RWDaLayer: LayerTypes = 26;
+const TDPredLayer: LayerTypes = 27;
+const TDIntegLayer: LayerTypes = 28;
+const TDDaLayer: LayerTypes = 29;
+
+///////////// import: "layervars.go"
+alias LayerVars = i32; //enums:enum
+const LayerActMAvg: LayerVars = 0;
+const LayerActPAvg: LayerVars = 1;
+const LayerAvgMaxGeM: LayerVars = 2;
+const LayerAvgMaxGiM: LayerVars = 3;
+const LayerGiMult: LayerVars = 4;
+const LayerPhaseDiff: LayerVars = 5;
+const LayerPhaseDiffAvg: LayerVars = 6;
+const LayerPhaseDiffVar: LayerVars = 7;
+const LayerRT: LayerVars = 8;
+const LayerRewPredPos: LayerVars = 9;
+const LayerRewPredNeg: LayerVars = 10;
+
+///////////// import: "learn-layer.go"
+
+///////////// import: "learn-net.go"
+
+///////////// import: "learn-path.go"
+
+///////////// import: "learn.go"
+struct LearnCaParams {
+ Norm: f32,
+ SpkVGCC: i32,
+ SpkVgccCa: f32,
+ VgccTau: f32,
+ Dt: CaDtParams,
+ UpdateThr: f32,
+ VgccDt: f32,
+ NormInv: f32,
+ pad: i32,
+}
+struct TrgAvgActParams {
+ GiBaseInit: f32,
+ RescaleOn: i32,
+ ErrLRate: f32,
+ SynScaleRate: f32,
+ SubMean: f32,
+ Permute: i32,
+ Pool: i32,
+ pad: i32,
+ TrgRange: F32,
+}
+struct RLRateParams {
+ On: i32,
+ SigmoidLinear: i32,
+ SigmoidMin: f32,
+ Diff: i32,
+ SpkThr: f32,
+ DiffThr: f32,
+ Min: f32,
+ pad: i32,
+}
+struct LearnNeurParams {
+ CaLearn: LearnCaParams,
+ CaSpk: NeurCaParams,
+ LrnNMDA: NMDAParams,
+ TrgAvgAct: TrgAvgActParams,
+ RLRate: RLRateParams,
+ NeuroMod: NeuroModParams,
+}
+struct SWtInitParams {
+ SPct: f32,
+ Mean: f32,
+ Var: f32,
+ Sym: i32,
+}
+struct SWtAdaptParams {
+ On: i32,
+ LRate: f32,
+ SubMean: f32,
+ SigGain: f32,
+}
+struct SWtParams {
+ Init: SWtInitParams,
+ Adapt: SWtAdaptParams,
+ Limit: F32,
+}
+struct LRateParams {
+ Base: f32,
+ Sched: f32,
+ Mod: f32,
+ Eff: f32,
+}
+struct TraceParams {
+ Tau: f32,
+ SubMean: f32,
+ LearnThr: f32,
+ Dt: f32,
+}
+struct LRateMod {
+ On: i32,
+ Base: f32,
+ pad: i32,
+ pad1: i32,
+ Range: F32,
+}
+struct HebbParams {
+ On: i32,
+ Up: f32,
+ Down: f32,
+ pad: f32,
+}
+struct LearnSynParams {
+ Learn: i32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+ LRate: LRateParams,
+ Trace: TraceParams,
+ KinaseCa: SynCaLinear,
+ Hebb: HebbParams,
+}
+
+///////////// import: "looper.go"
+alias ViewTimes = i32; //enums:enum
+const Cycle: ViewTimes = 0;
+const FastSpike: ViewTimes = 1;
+const Gamma: ViewTimes = 2;
+const Beta: ViewTimes = 3;
+const Alpha: ViewTimes = 4;
+const Phase: ViewTimes = 5;
+const Theta: ViewTimes = 6;
+
+///////////// import: "math32-fastexp.go"
+
+///////////// import: "minmax-avgmax.go"
+const MaxFloat32: f32 = 3.402823466e+38;
+const MinFloat32: f32 = 1.175494351e-38;
+struct AvgMax32 {
+ Avg: f32,
+ Max: f32,
+ Sum: f32,
+ MaxIndex: i32,
+ N: i32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+}
+
+///////////// import: "minmax-minmax32.go"
+struct F32 {
+ Min: f32,
+ Max: f32,
+ pad: i32,
+ pad1: i32, // for gpu use
+}
+
+///////////// import: "network.go"
+struct NetworkIndexes {
+ MaxData: u32,
+ MaxDelay: u32,
+ NLayers: u32,
+ NNeurons: u32,
+ NPools: u32,
+ NPaths: u32,
+ NSyns: u32,
+ RubiconNPosUSs: u32,
+ RubiconNCosts: u32,
+ RubiconNNegUSs: u32,
+ GPUMaxBuffFloats: u32,
+ GPUSynCaBanks: u32,
+}
+
+///////////// import: "neuromod.go"
+alias DAModTypes = i32; //enums:enum
+const NoDAMod: DAModTypes = 0;
+const D1Mod: DAModTypes = 1;
+const D2Mod: DAModTypes = 2;
+const D1AbsMod: DAModTypes = 3;
+alias ValenceTypes = i32; //enums:enum
+const Positive: ValenceTypes = 0;
+const Negative: ValenceTypes = 1;
+const Cost: ValenceTypes = 2;
+struct NeuroModParams {
+ DAMod: DAModTypes,
+ Valence: ValenceTypes,
+ DAModGain: f32,
+ DALRateSign: i32,
+ DALRateMod: f32,
+ AChLRateMod: f32,
+ AChDisInhib: f32,
+ BurstGain: f32,
+ DipGain: f32,
+ pad: f32,
+ pad1: f32,
+ pad2: f32,
+}
+
+///////////// import: "neuron.go"
+alias NeuronFlags = i32; //enums:enum
+const NeuronOff: NeuronFlags = 1;
+const NeuronHasExt: NeuronFlags = 2;
+const NeuronHasTarg: NeuronFlags = 4;
+const NeuronHasCmpr: NeuronFlags = 8;
+alias NeuronVars = i32; //enums:enum
+const Spike: NeuronVars = 0;
+const Spiked: NeuronVars = 1;
+const Act: NeuronVars = 2;
+const ActInt: NeuronVars = 3;
+const Ge: NeuronVars = 4;
+const Gi: NeuronVars = 5;
+const Gk: NeuronVars = 6;
+const Inet: NeuronVars = 7;
+const Vm: NeuronVars = 8;
+const VmDend: NeuronVars = 9;
+const ISI: NeuronVars = 10;
+const ISIAvg: NeuronVars = 11;
+const Ext: NeuronVars = 12;
+const Target: NeuronVars = 13;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
+alias NeuronAvgVars = i32; //enums:enum
+const ActAvg: NeuronAvgVars = 0;
+const AvgPct: NeuronAvgVars = 1;
+const TrgAvg: NeuronAvgVars = 2;
+const DTrgAvg: NeuronAvgVars = 3;
+const AvgDif: NeuronAvgVars = 4;
+const GeBase: NeuronAvgVars = 5;
+const GiBase: NeuronAvgVars = 6;
+alias NeuronIndexVars = i32; //enums:enum
+const NrnNeurIndex: NeuronIndexVars = 0;
+const NrnLayIndex: NeuronIndexVars = 1;
+const NrnSubPool: NeuronIndexVars = 2;
+
+///////////// import: "pathparams.go"
+const StartOff: i32 = 0;
+const Nitems: i32 = 1;
+const StartNN: i32 = 2;
+struct StartN {
+ Start: u32,
+ N: u32,
+ pad: u32,
+ pad1: u32, // todo: see if we can do without these?
+}
+struct PathIndexes {
+ RecvLayer: u32,
+ RecvNeurSt: u32,
+ RecvNeurN: u32,
+ SendLayer: u32,
+ SendNeurSt: u32,
+ SendNeurN: u32,
+ SynapseSt: u32,
+ SendConSt: u32,
+ RecvConSt: u32,
+ RecvSynSt: u32,
+ NPathNeurSt: u32,
+ pad: u32,
+}
+struct GScaleValues {
+ Scale: f32,
+ Rel: f32,
+ pad: f32,
+ pad1: f32,
+}
+struct PathParams {
+ Type: PathTypes,
+ Index: u32,
+ pad: i32,
+ pad1: i32,
+ Indexes: PathIndexes,
+ Com: SynComParams,
+ PathScale: PathScaleParams,
+ SWts: SWtParams,
+ Learn: LearnSynParams,
+ GScale: GScaleValues,
+ RLPred: RLPredPathParams,
+ Matrix: MatrixPathParams,
+ BLA: BLAPathParams,
+ Hip: HipPathParams,
+}
+
+///////////// import: "pathtypes.go"
+alias PathTypes = i32; //enums:enum
+const ForwardPath: PathTypes = 0;
+const BackPath: PathTypes = 1;
+const LateralPath: PathTypes = 2;
+const InhibPath: PathTypes = 3;
+const CTCtxtPath: PathTypes = 4;
+const RWPath: PathTypes = 5;
+const TDPredPath: PathTypes = 6;
+const BLAPath: PathTypes = 7;
+const HipPath: PathTypes = 8;
+const VSPatchPath: PathTypes = 9;
+const VSMatrixPath: PathTypes = 10;
+const DSMatrixPath: PathTypes = 11;
+
+///////////// import: "pcore-layer.go"
+struct MatrixParams {
+ GateThr: f32,
+ IsVS: i32,
+ OtherMatrixIndex: i32,
+ ThalLay1Index: i32,
+ ThalLay2Index: i32,
+ ThalLay3Index: i32,
+ ThalLay4Index: i32,
+ ThalLay5Index: i32,
+ ThalLay6Index: i32,
+ pad: i32,
+ pad1: i32,
+ pad2: i32,
+}
+alias GPLayerTypes = i32; //enums:enum
+const GPePr: GPLayerTypes = 0;
+const GPeAk: GPLayerTypes = 1;
+const GPi: GPLayerTypes = 2;
+struct GPParams {
+ GPType: GPLayerTypes,
+ pad: u32,
+ pad1: u32,
+ pad2: u32,
+}
+
+///////////// import: "pcore-path.go"
+struct MatrixPathParams {
+ Credit: f32,
+ BasePF: f32,
+ Delta: f32,
+ VSRewLearn: i32,
+}
+
+///////////// import: "pool.go"
+alias PoolIntVars = i32; //enums:enum
+const PoolNeurSt: PoolIntVars = 0;
+const PoolNeurEd: PoolIntVars = 1;
+const PoolLayerIdx: PoolIntVars = 2;
+const PoolIsLayer: PoolIntVars = 3;
+const Clamped: PoolIntVars = 4;
+const PoolGated: PoolIntVars = 5;
+const FFsRawInt: PoolIntVars = 6;
+const FBsRawInt: PoolIntVars = 7;
+const GeExtRawInt: PoolIntVars = 8;
+const PoolIntAvgMaxStart: PoolIntVars = 9;
+alias AvgMax = i32; //enums:enum
+const Avg: AvgMax = 0;
+const Max: AvgMax = 1;
+alias AvgMaxPhases = i32; //enums:enum -trim-prefix AM
+const AMCycle: AvgMaxPhases = 0;
+const AMMinus: AvgMaxPhases = 1;
+const AMPlus: AvgMaxPhases = 2;
+const AMPrev: AvgMaxPhases = 3;
+alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
+const AMSpkMax: AvgMaxVars = 2;
+const AMAct: AvgMaxVars = 3;
+const AMGeInt: AvgMaxVars = 4;
+const AMGiInt: AvgMaxVars = 5;
+const AMAvgDif: AvgMaxVars = 6;
+const poolFloatAvgMaxStart = InhibVarsN;
+const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
+const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
+
+///////////// import: "rand.go"
+alias RandFunIndex = u32;
+const RandFunActPGe: RandFunIndex = 0;
+const RandFunActPGi: RandFunIndex = 1;
+const RandFunActSMaintP: RandFunIndex = 2;
+const RandFunIndexN: RandFunIndex = 3;
+
+///////////// import: "rl-layer.go"
+struct RWPredParams {
+ PredRange: F32,
+}
+struct RWDaParams {
+ TonicGe: f32,
+ RWPredLayIndex: i32,
+ pad: u32,
+ pad1: u32,
+}
+struct TDIntegParams {
+ Discount: f32,
+ PredGain: f32,
+ TDPredLayIndex: i32,
+ pad: u32,
+}
+struct TDDaParams {
+ TonicGe: f32,
+ TDIntegLayIndex: i32,
+ pad: u32,
+ pad1: u32,
+}
+
+///////////// import: "rl-path.go"
+struct RLPredPathParams {
+ OppSignLRate: f32,
+ DaTol: f32,
+ pad: f32,
+ pad1: f32,
+}
+
+///////////// import: "rubicon-layer.go"
+struct LDTParams {
+ SrcThr: f32,
+ Rew: i32,
+ MaintInhib: f32,
+ SrcLay1Index: i32,
+ SrcLay2Index: i32,
+ SrcLay3Index: i32,
+ SrcLay4Index: i32,
+ pad: f32,
+}
+struct VTAParams {
+ CeMGain: f32,
+ LHbGain: f32,
+ AChThr: f32,
+ pad: f32,
+}
+
+///////////// import: "rubicon-path.go"
+struct BLAPathParams {
+ NegDeltaLRate: f32,
+ AChThr: f32,
+ USTrace: f32,
+ pad: f32,
+}
+
+///////////// import: "rubicon.go"
+
+///////////// import: "stats.go"
+
+///////////// import: "synapse.go"
+alias SynapseVars = i32; //enums:enum
+const Wt: SynapseVars = 0;
+const LWt: SynapseVars = 1;
+const SWt: SynapseVars = 2;
+const DWt: SynapseVars = 3;
+const DSWt: SynapseVars = 4;
+alias SynapseTraceVars = i32; //enums:enum
+const Tr: SynapseTraceVars = 0;
+const DTr: SynapseTraceVars = 1;
+const DiDWt: SynapseTraceVars = 2;
+alias SynapseIndexVars = i32; //enums:enum
+const SynRecvIndex: SynapseIndexVars = 0;
+const SynSendIndex: SynapseIndexVars = 1;
+const SynPathIndex: SynapseIndexVars = 2;
+
+///////////// import: "slrand.wgsl"
+fn Philox2x32round(counter: su64, key: u32) -> su64 {
+ let mul = Uint32Mul64(u32(0xD256D193), counter.x);
+ var ctr: su64;
+ ctr.x = mul.y ^ key ^ counter.y;
+ ctr.y = mul.x;
+ return ctr;
+}
+fn Philox2x32bumpkey(key: u32) -> u32 {
+ return key + u32(0x9E3779B9);
+}
+fn Philox2x32(counter: su64, key: u32) -> vec2 {
+ var ctr = Philox2x32round(counter, key); // 1
+ var ky = Philox2x32bumpkey(key);
+ ctr = Philox2x32round(ctr, ky); // 2
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 3
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 4
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 5
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 6
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 7
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 8
+ ky = Philox2x32bumpkey(ky);
+ ctr = Philox2x32round(ctr, ky); // 9
+ ky = Philox2x32bumpkey(ky);
+ return Philox2x32round(ctr, ky); // 10
+}
+fn RandUint32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 {
+ return Philox2x32(Uint64Add32(counter, funcIndex), key);
+}
+fn RandUint32(counter: su64, funcIndex: u32, key: u32) -> u32 {
+ return Philox2x32(Uint64Add32(counter, funcIndex), key).x;
+}
+fn RandFloat32Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 {
+ return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key));
+}
+fn RandFloat32(counter: su64, funcIndex: u32, key: u32) -> f32 {
+ return Uint32ToFloat32(RandUint32(counter, funcIndex, key));
+}
+fn RandFloat32Range11Vec2(counter: su64, funcIndex: u32, key: u32) -> vec2 {
+ return Uint32ToFloat32Vec2(RandUint32Vec2(counter, funcIndex, key));
+}
+fn RandFloat32Range11(counter: su64, funcIndex: u32, key: u32) -> f32 {
+ return Uint32ToFloat32Range11(RandUint32(counter, funcIndex, key));
+}
+fn RandBoolP(counter: su64, funcIndex: u32, key: u32, p: f32) -> bool {
+ return (RandFloat32(counter, funcIndex, key) < p);
+}
+fn sincospi(x: f32) -> vec2 {
+ let PIf = 3.1415926535897932;
+ var r: vec2;
+ r.x = cos(PIf*x);
+ r.y = sin(PIf*x);
+ return r;
+}
+fn RandFloat32NormVec2(counter: su64, funcIndex: u32, key: u32) -> vec2 {
+ let ur = RandUint32Vec2(counter, funcIndex, key);
+ var f = sincospi(Uint32ToFloat32Range11(ur.x));
+ let r = sqrt(-2.0 * log(Uint32ToFloat32(ur.y))); // guaranteed to avoid 0.
+ return f * r;
+}
+fn RandFloat32Norm(counter: su64, funcIndex: u32, key: u32) -> f32 {
+ return RandFloat32Vec2(counter, funcIndex, key).x;
+}
+fn RandUint32N(counter: su64, funcIndex: u32, key: u32, n: u32) -> u32 {
+ let v = RandFloat32(counter, funcIndex, key);
+ return u32(v * f32(n));
+}
+struct RandCounter {
+ Counter: su64,
+ HiSeed: u32,
+ pad: u32,
+}
+fn RandCounter_Reset(ct: ptr) {
+ (*ct).Counter.x = u32(0);
+ (*ct).Counter.y = (*ct).HiSeed;
+}
+fn RandCounter_Seed(ct: ptr, seed: u32) {
+ (*ct).HiSeed = seed;
+ RandCounter_Reset(ct);
+}
+fn RandCounter_Add(ct: ptr, inc: u32) {
+ (*ct).Counter = Uint64Add32((*ct).Counter, inc);
+}
+
+///////////// import: "sltype.wgsl"
+alias su64 = vec2;
+fn Uint32Mul64(a: u32, b: u32) -> su64 {
+ let LOMASK = (((u32(1))<<16)-1);
+ var r: su64;
+ r.x = a * b; /* full low multiply */
+ let ahi = a >> 16;
+ let alo = a & LOMASK;
+ let bhi = b >> 16;
+ let blo = b & LOMASK;
+ let ahbl = ahi * blo;
+ let albh = alo * bhi;
+ let ahbl_albh = ((ahbl&LOMASK) + (albh&LOMASK));
+ var hit = ahi*bhi + (ahbl>>16) + (albh>>16);
+ hit += ahbl_albh >> 16; /* carry from the sum of lo(ahbl) + lo(albh) ) */
+ /* carry from the sum with alo*blo */
+ if ((r.x >> u32(16)) < (ahbl_albh&LOMASK)) {
+ hit += u32(1);
+ }
+ r.y = hit;
+ return r;
+}
+/*
+fn Uint32Mul64(a: u32, b: u32) -> su64 {
+ return su64(a) * su64(b);
+}
+*/
+fn Uint64Add32(a: su64, b: u32) -> su64 {
+ if (b == 0) {
+ return a;
+ }
+ var s = a;
+ if (s.x > u32(0xffffffff) - b) {
+ s.y++;
+ s.x = (b - 1) - (u32(0xffffffff) - s.x);
+ } else {
+ s.x += b;
+ }
+ return s;
+}
+fn Uint64Incr(a: su64) -> su64 {
+ var s = a;
+ if(s.x == 0xffffffff) {
+ s.y++;
+ s.x = u32(0);
+ } else {
+ s.x++;
+ }
+ return s;
+}
+fn Uint32ToFloat32(val: u32) -> f32 {
+ let factor = f32(1.0) / (f32(u32(0xffffffff)) + f32(1.0));
+ let halffactor = f32(0.5) * factor;
+ var f = f32(val) * factor + halffactor;
+ if (f == 1.0) { // exclude 1
+ return bitcast(0x3F7FFFFF);
+ }
+ return f;
+}
+fn Uint32ToFloat32Vec2(val: vec2) -> vec2 {
+ var r: vec2;
+ r.x = Uint32ToFloat32(val.x);
+ r.y = Uint32ToFloat32(val.y);
+ return r;
+}
+fn Uint32ToFloat32Range11(val: u32) -> f32 {
+ let factor = f32(1.0) / (f32(i32(0x7fffffff)) + f32(1.0));
+ let halffactor = f32(0.5) * factor;
+ return (f32(val) * factor + halffactor);
+}
+fn Uint32ToFloat32Range11Vec2(val: vec2) -> vec2 {
+ var r: vec2;
+ r.x = Uint32ToFloat32Range11(val.x);
+ r.y = Uint32ToFloat32Range11(val.y);
+ return r;
+}
\ No newline at end of file
diff --git a/axon/shaders/BetweenGi.wgsl b/axon/shaders/BetweenGi.wgsl
index fc1c07e3a..9c92f7c19 100644
--- a/axon/shaders/BetweenGi.wgsl
+++ b/axon/shaders/BetweenGi.wgsl
@@ -444,7 +444,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -747,7 +747,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -780,7 +780,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -936,82 +936,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1142,8 +1141,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1152,7 +1151,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
///////////// import: "rand.go"
alias RandFunIndex = u32;
diff --git a/axon/shaders/CycleInc.wgsl b/axon/shaders/CycleInc.wgsl
index f6fc7f4a3..4b3aa609b 100644
--- a/axon/shaders/CycleInc.wgsl
+++ b/axon/shaders/CycleInc.wgsl
@@ -426,7 +426,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -726,7 +726,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -759,7 +759,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -915,82 +915,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1121,8 +1120,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1131,7 +1130,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
///////////// import: "rand.go"
alias RandFunIndex = u32;
diff --git a/axon/shaders/CycleNeuron.wgsl b/axon/shaders/CycleNeuron.wgsl
index 7438a5e3a..0d8ef47b3 100644
--- a/axon/shaders/CycleNeuron.wgsl
+++ b/axon/shaders/CycleNeuron.wgsl
@@ -106,7 +106,7 @@ fn LayerParams_PulvinarDriver(ly: ptr, ctx: ptr, ctx: ptr= (*ly).Acts.Dt.MaxCycStart) {
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkMaxCa))] += (*ly).Learn.CaSpk.Dt.PDt * (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkM))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkMaxCa))]);
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkMaxCa))] += (*ly).Learn.CaSpk.Dt.PDt * (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaM))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkMaxCa))]);
var spkmax = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkMaxCa))];
if (spkmax > Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkMax))]) {
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkMax))] = spkmax;
@@ -710,7 +710,7 @@ fn ActParams_GSkCaFromCa(ac: ptr, ctx: ptr
var skcar = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SKCaR))];
var skcain = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SKCaIn))];
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SKCaM))] = SKCaParams_MFromCa(&(*ac).SKCa, skcar, Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SKCaM))]);
- SKCaParams_CaInRFromSpike(&(*ac).SKCa, Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Spike))], Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkD))], &skcain, &skcar);
+ SKCaParams_CaInRFromSpike(&(*ac).SKCa, Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Spike))], Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaD))], &skcain, &skcar);
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SKCaR))] = skcar;
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SKCaIn))] = skcain;
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Gsk))] = (*ac).SKCa.Gbar * Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SKCaM))];
@@ -1259,7 +1259,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -1572,7 +1572,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -1583,19 +1583,19 @@ struct CaLrnParams {
NormInv: f32,
pad: i32,
}
-fn CaLrnParams_VgccCaFromSpike(np: ptr, ctx: ptr, ni: u32,di: u32) {
+fn LearnCaParams_VgccCaFromSpike(np: ptr, ctx: ptr, ni: u32,di: u32) {
if ((*np).SpkVGCC == 1) {
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(VgccCa))] = (*np).SpkVgccCa * Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Spike))];
}
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(VgccCaInt))] += Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(VgccCa))] - (*np).VgccDt*Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(VgccCaInt))];
}
-fn CaLrnParams_CaLrns(np: ptr, ctx: ptr, ni: u32,di: u32) {
- CaLrnParams_VgccCaFromSpike(np, ctx, ni, di);
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaLrn))] = (*np).NormInv * (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NmdaCa))] + Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(VgccCaInt))]);
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NrnCaM))] += (*np).Dt.MDt * (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaLrn))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NrnCaM))]);
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NrnCaP))] += (*np).Dt.PDt * (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NrnCaM))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NrnCaP))]);
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NrnCaD))] += (*np).Dt.DDt * (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NrnCaP))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NrnCaD))]);
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaDiff))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NrnCaP))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NrnCaD))];
+fn LearnCaParams_LearnCas(np: ptr, ctx: ptr, ni: u32,di: u32) {
+ LearnCaParams_VgccCaFromSpike(np, ctx, ni, di);
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCa))] = (*np).NormInv * (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(NmdaCa))] + Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(VgccCaInt))]);
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCaM))] += (*np).Dt.MDt * (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCa))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCaM))]);
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCaP))] += (*np).Dt.PDt * (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCaM))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCaP))]);
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCaD))] += (*np).Dt.DDt * (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCaP))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCaD))]);
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaDiff))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCaP))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(LearnCaD))];
}
struct TrgAvgActParams {
GiBaseInit: f32,
@@ -1619,7 +1619,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -1635,14 +1635,14 @@ fn LearnNeurParams_LrnNMDAFromRaw(ln: ptr, ctx: ptr, ctx: ptr, ni: u32,di: u32) {
var caSyn: f32;
- var caSpkM = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkM))];
- var caSpkP = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkP))];
- var caSpkD = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkD))];
+ var caSpkM = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaM))];
+ var caSpkP = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaP))];
+ var caSpkD = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaD))];
NeurCaParams_CaFromSpike(&(*ln).CaSpk, Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Spike))], &caSyn, &caSpkM, &caSpkP, &caSpkD);
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkM))] = caSpkM;
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkP))] = caSpkP;
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkD))] = caSpkD;
- CaLrnParams_CaLrns(&(*ln).CaLearn, ctx, ni, di);
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaM))] = caSpkM;
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaP))] = caSpkP;
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaD))] = caSpkD;
+ LearnCaParams_LearnCas(&(*ln).CaLearn, ctx, ni, di);
}
struct SWtInitParams {
SPct: f32,
@@ -1846,82 +1846,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -2052,8 +2051,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -2062,7 +2061,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/CyclePost.wgsl b/axon/shaders/CyclePost.wgsl
index 806a29fa2..03f60a363 100644
--- a/axon/shaders/CyclePost.wgsl
+++ b/axon/shaders/CyclePost.wgsl
@@ -124,7 +124,7 @@ fn LayerParams_CyclePost(ly: ptr, ctx: ptr, ctx: ptr, lpi: u32,di: u32) {
- var casp = PoolAvgMax(AMCaSpkP, AMCycle, Max, lpi, di);
+ var casp = PoolAvgMax(AMCaP, AMCycle, Max, lpi, di);
if ((*ctx).Cycle >= (*ly).Acts.Dt.MaxCycStart && casp > 0.5) { // todo: param
if (LayerStates[IndexF323D(LayerStates[0], LayerStates[1], LayerStates[2], u32((*ly).Index),u32(di),u32(LayerRT))] <= 0) {
LayerStates[IndexF323D(LayerStates[0], LayerStates[1], LayerStates[2], u32((*ly).Index),u32(di),u32(LayerRT))] = f32((*ctx).Cycle);
@@ -136,7 +136,7 @@ fn LayerParams_LDTSrcLayAct(ly: ptr, layIndex: i32, di: u3
return f32(0);
}
var oly = Layers[u32(layIndex)];
- var opi = LayerParams_PoolIndex(&oly, u32(u32(0)));return PoolAvgMax(AMCaSpkP, AMCycle, Avg, opi, di);
+ var opi = LayerParams_PoolIndex(&oly, u32(u32(0)));return PoolAvgMax(AMCaP, AMCycle, Avg, opi, di);
}
fn LayerParams_CyclePostLDTLayer(ly: ptr, ctx: ptr, di: u32, srcLay1Act: f32,srcLay2Act: f32,srcLay3Act: f32,srcLay4Act: f32) {
var ach = LDTParams_ACh(&(*ly).LDT, ctx, di, srcLay1Act, srcLay2Act, srcLay3Act, srcLay4Act);
@@ -195,7 +195,7 @@ fn LayerParams_CyclePostTDDaLayer(ly: ptr, ctx: ptr, ctx: ptr, lpi: u32,di: u32) {
- var casd = PoolAvgMax(AMCaSpkD, AMCycle, Max, lpi, di);
+ var casd = PoolAvgMax(AMCaD, AMCycle, Max, lpi, di);
if ((*ly).Learn.NeuroMod.Valence == Positive) {
GlobalScalars[IndexF322D(GlobalScalars[0], GlobalScalars[1], u32(GvCeMpos),u32(di))] = casd;
} else {
@@ -207,7 +207,7 @@ fn LayerParams_CyclePostVTALayer(ly: ptr, ctx: ptr 0));
}
fn LayerParams_CyclePostVSPatchLayer(ly: ptr, ctx: ptr, pi: u32,di: u32, spi: i32) {
- var casd = PoolAvgMax(AMCaSpkD, AMCycle, Avg, pi, di);
+ var casd = PoolAvgMax(AMCaD, AMCycle, Avg, pi, di);
if ((*ly).Learn.NeuroMod.DAMod == D1Mod) {
GlobalVectors[IndexF323D(GlobalVectors[0], GlobalVectors[1], GlobalVectors[2], u32(GvVSPatchD1),u32(u32(pi - 1)),u32(di))] = casd;
} else {
@@ -557,7 +557,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -860,7 +860,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -893,7 +893,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -1049,82 +1049,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1283,8 +1282,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1293,7 +1292,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/DWtFromDiSyn.wgsl b/axon/shaders/DWtFromDiSyn.wgsl
index f66ab3c71..46675b80f 100644
--- a/axon/shaders/DWtFromDiSyn.wgsl
+++ b/axon/shaders/DWtFromDiSyn.wgsl
@@ -411,7 +411,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -724,7 +724,7 @@ fn PathParams_DWtFromDi(pt: ptr, ctx: ptr
}
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -757,7 +757,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -913,82 +913,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1119,8 +1118,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1129,7 +1128,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
///////////// import: "rand.go"
alias RandFunIndex = u32;
diff --git a/axon/shaders/DWtSubMeanNeuron.wgsl b/axon/shaders/DWtSubMeanNeuron.wgsl
index 67906b8d1..19f3007f1 100644
--- a/axon/shaders/DWtSubMeanNeuron.wgsl
+++ b/axon/shaders/DWtSubMeanNeuron.wgsl
@@ -411,7 +411,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -760,7 +760,7 @@ fn PathParams_DWtSubMean(pt: ptr, ctx: ptr, ctx: ptr, ctx: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32) {
- var rNrnCaP = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(NrnCaP))];
- var sNrnCap = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(si),u32(di),u32(NrnCaP))];
+ var rLearnCaP = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(LearnCaP))];
+ var sNrnCap = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(si),u32(di),u32(LearnCaP))];
var lwt = Synapses[IndexF322D(Synapses[0], Synapses[ // linear weight
1], u32(syni),u32(LWt))];
- var hebb = rNrnCaP * ((*pt).Learn.Hebb.Up*sNrnCap*(1-lwt) - (*pt).Learn.Hebb.Down*(1-sNrnCap)*lwt);
+ var hebb = rLearnCaP * ((*pt).Learn.Hebb.Up*sNrnCap*(1-lwt) - (*pt).Learn.Hebb.Down*(1-sNrnCap)*lwt);
SynapseTraces[IndexF323D(SynapseTraces[0], SynapseTraces[1], SynapseTraces[2], u32(syni),u32(DiDWt),u32(di))] = (*pt).Learn.LRate.Eff * hebb;
}
fn PathParams_DWtSynHip(pt: ptr, ctx: ptr, syni: u32,si: u32,ri: u32,lpi: u32,pi: u32,di: u32, isTarget: bool) {
@@ -861,13 +861,13 @@ fn PathParams_DWtSynHip(pt: ptr, ctx: ptr
if (Synapses[IndexF322D(Synapses[0], Synapses[1], u32(syni),u32(Wt))] == 0) {
return;
}
- var rNrnCaP = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(NrnCaP))];
- var rNrnCaD = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(NrnCaD))];
+ var rLearnCaP = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(LearnCaP))];
+ var rLearnCaD = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(LearnCaD))];
var err: f32;
if (isTarget) {
err = syCaP - syCaD; // for target layers, syn Ca drives error signal directly
} else {
- err = tr * (rNrnCaP - rNrnCaD); // hiddens: recv NMDA Ca drives error signal w/ trace credit
+ err = tr * (rLearnCaP - rLearnCaD); // hiddens: recv NMDA Ca drives error signal w/ trace credit
}
var lwt = Synapses[IndexF322D(Synapses[0], Synapses[ // linear weight
1], u32(syni),u32(LWt))];
@@ -876,10 +876,10 @@ fn PathParams_DWtSynHip(pt: ptr, ctx: ptr
} else {
err *= lwt;
}
- var sNrnCap = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(si),u32(di),u32(NrnCaP))];
+ var sNrnCap = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(si),u32(di),u32(LearnCaP))];
var savg = 0.5 + (*pt).Hip.SAvgCor*((*pt).Hip.SNominal-0.5);
savg = 0.5 / max((*pt).Hip.SAvgThr, savg); // keep this Sending Average Correction term within bounds (SAvgThr)
- var hebb = rNrnCaP * (sNrnCap*(savg-lwt) - (1-sNrnCap)*lwt);
+ var hebb = rLearnCaP * (sNrnCap*(savg-lwt) - (1-sNrnCap)*lwt);
var dwt = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(RLRate))] * (*pt).Learn.LRate.Eff * ((*pt).Hip.Hebb*hebb + (*pt).Hip.Err*err);
SynapseTraces[IndexF323D(SynapseTraces[0], SynapseTraces[1],
SynapseTraces[2], u32(syni),u32(DiDWt),u32(di))] = dwt;
@@ -889,14 +889,14 @@ fn PathParams_DWtSynBLA(pt: ptr, ctx: ptr
var ach = GlobalScalars[IndexF322D(GlobalScalars[0], GlobalScalars[1], u32(GvACh),u32(di))];
if (GlobalScalars[IndexF322D(GlobalScalars[0], GlobalScalars[ // learn and reset
1], u32(GvHasRew),u32(di))] > 0) {
- var ract = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(CaSpkD))];
+ var ract = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(CaD))];
if (ract < (*pt).Learn.Trace.LearnThr) {
ract = f32(0);
}
var tr = SynapseTraces[IndexF323D(SynapseTraces[0], SynapseTraces[1], SynapseTraces[2], u32(syni),u32(Tr),u32(di))];
var ustr = (*pt).BLA.USTrace;
tr = ustr*Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(si),u32(di),u32(Burst))] + (1.0-ustr)*tr;
- var delta = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(CaSpkP))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(SpkPrv))];
+ var delta = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(CaP))] - Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ri),u32(di),u32(SpkPrv))];
if (delta < 0) { // neg delta learns slower in Acq, not Ext
delta *= (*pt).BLA.NegDeltaLRate;
}
@@ -946,7 +946,7 @@ fn PathParams_DWtSynRWPred(pt: ptr, ctx: ptr, ctx: ptr (*pt).Learn.Trace.LearnThr) { // key: prevents learning if < threshold
dtr += ach * ((*pt).Matrix.Credit * sact * rminus);
@@ -1012,9 +1012,9 @@ fn PathParams_DWtSynDSMatrix(pt: ptr, ctx: ptr (*pt).Learn.Trace.LearnThr) { // key: prevents learning if < threshold
dtr += rlr * ((*pt).Matrix.Credit * pfmod * sact * rminus);
@@ -1038,7 +1038,7 @@ fn PathParams_DWtSynVSPatch(pt: ptr, ctx: ptr u32 { return u32(PoolIntAvgMaxStart) + u32(vr)*u32(AvgMaxN) + u32(am); }
fn PoolNNeurons(pi: u32) -> i32 { return PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(0),u32(PoolNeurEd))] - PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[
2], u32(pi),u32(0),u32(PoolNeurSt))]; }
@@ -1246,8 +1245,8 @@ fn PoolAvgMaxUpdateVar(vr: AvgMaxVars, pi: u32,di: u32, val: f32) {
atomicMax(&PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(di),u32(vim))], i32(val*floatToInt));
}
fn PoolAvgMaxUpdate(pi: u32,di: u32,ni: u32) {
- PoolAvgMaxUpdateVar(AMCaSpkP, pi, di, abs(Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(avgMaxToNeuron[AMCaSpkP]))]));
- PoolAvgMaxUpdateVar(AMCaSpkD, pi, di, abs(Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(avgMaxToNeuron[AMCaSpkD]))]));
+ PoolAvgMaxUpdateVar(AMCaP, pi, di, abs(Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(avgMaxToNeuron[AMCaP]))]));
+ PoolAvgMaxUpdateVar(AMCaD, pi, di, abs(Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(avgMaxToNeuron[AMCaD]))]));
PoolAvgMaxUpdateVar(AMSpkMax, pi, di, abs(Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(avgMaxToNeuron[AMSpkMax]))]));
PoolAvgMaxUpdateVar(AMAct, pi, di, abs(Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(avgMaxToNeuron[AMAct]))]));
PoolAvgMaxUpdateVar(AMGeInt, pi, di, abs(Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(avgMaxToNeuron[AMGeInt]))]));
diff --git a/axon/shaders/InitGBuffsPath.wgsl b/axon/shaders/InitGBuffsPath.wgsl
index bafb76cb3..eeb538eaa 100644
--- a/axon/shaders/InitGBuffsPath.wgsl
+++ b/axon/shaders/InitGBuffsPath.wgsl
@@ -431,7 +431,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -731,7 +731,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -764,7 +764,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -920,82 +920,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1126,8 +1125,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1136,7 +1135,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
///////////// import: "rand.go"
alias RandFunIndex = u32;
diff --git a/axon/shaders/LayerGi.wgsl b/axon/shaders/LayerGi.wgsl
index f0fb6bb75..fb5cfaae6 100644
--- a/axon/shaders/LayerGi.wgsl
+++ b/axon/shaders/LayerGi.wgsl
@@ -435,7 +435,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -820,7 +820,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -853,7 +853,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -1009,82 +1009,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1215,8 +1214,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1225,7 +1224,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/MinusPhaseNeuron.wgsl b/axon/shaders/MinusPhaseNeuron.wgsl
index 583af42b2..27748af1a 100644
--- a/axon/shaders/MinusPhaseNeuron.wgsl
+++ b/axon/shaders/MinusPhaseNeuron.wgsl
@@ -83,7 +83,6 @@ fn IndexI323D(s0: i32, s1: i32, s2: i32, i0: u32, i1: u32, i2: u32) -> u32 {
///////////// import: "act-layer.go"
fn LayerParams_MinusPhaseNeuron(ly: ptr, ctx: ptr, ni: u32,di: u32) {
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(ActM))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(ActInt))];
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkPM))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkP))];
}
///////////// import: "act-net.go"
@@ -429,7 +428,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -729,7 +728,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -762,7 +761,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -918,82 +917,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1124,8 +1122,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1134,7 +1132,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
///////////// import: "rand.go"
alias RandFunIndex = u32;
diff --git a/axon/shaders/MinusPhasePool.wgsl b/axon/shaders/MinusPhasePool.wgsl
index 0db714055..200b17ced 100644
--- a/axon/shaders/MinusPhasePool.wgsl
+++ b/axon/shaders/MinusPhasePool.wgsl
@@ -445,7 +445,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -745,7 +745,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -778,7 +778,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -934,82 +934,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1140,8 +1139,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1150,7 +1149,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/MinusPhasePost.wgsl b/axon/shaders/MinusPhasePost.wgsl
index 561cafbb7..606fa5d34 100644
--- a/axon/shaders/MinusPhasePost.wgsl
+++ b/axon/shaders/MinusPhasePost.wgsl
@@ -268,13 +268,13 @@ fn ActParams_DecayLearnCa(ac: ptr, ctx: ptr u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/NewStateNeuron.wgsl b/axon/shaders/NewStateNeuron.wgsl
index 48f5ea191..3fe952f18 100644
--- a/axon/shaders/NewStateNeuron.wgsl
+++ b/axon/shaders/NewStateNeuron.wgsl
@@ -83,7 +83,7 @@ fn IndexI323D(s0: i32, s1: i32, s2: i32, i0: u32, i1: u32, i2: u32) -> u32 {
///////////// import: "act-layer.go"
fn LayerParams_NewStateNeuron(ly: ptr, ctx: ptr, ni: u32,di: u32) {
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(BurstPrv))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Burst))];
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkPrv))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkD))];
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkPrv))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaD))];
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkMax))] = 0.0;
Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(SpkMaxCa))] = 0.0;
ActParams_DecayState(&(*ly).Acts, ctx, ni, di, (*ly).Acts.Decay.Act, (*ly).Acts.Decay.Glong, (*ly).Acts.Decay.AHP);
@@ -249,13 +249,13 @@ fn ActParams_DecayLearnCa(ac: ptr, ctx: ptr, ctx: ptr 0;
switch ((*ly).Type) {
case BLALayer: {
- dlr = RLRateParams_RLRateDiff(&(*ly).Learn.RLRate, nrnCaSpkP, Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[ // delta on previous trial
- 2], u32(ni),u32(di),u32(SpkPrv))]);
+ dlr = RLRateParams_RLRateDiff(&(*ly).Learn.RLRate, nrnCaP, Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], // delta on previous trial
+ u32(ni),u32(di),u32(SpkPrv))]);
if (!NeuroModParams_IsBLAExt(&(*ly).Learn.NeuroMod) && PoolsInt[IndexI323D(PoolsInt[0], PoolsInt[1], PoolsInt[2], u32(pi),u32(0),u32(PoolNeurSt))] == 0) { // first pool
dlr = f32(0); // first pool is novelty / curiosity -- no learn
}
@@ -116,7 +116,7 @@ fn LayerParams_PlusPhaseNeuron(ly: ptr, ctx: ptr, ctx: ptr, scap: f32,scad: f32)
}return (*rl).Min;
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -1051,82 +1051,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1257,8 +1256,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1267,7 +1266,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/PlusPhasePool.wgsl b/axon/shaders/PlusPhasePool.wgsl
index a65ab4611..94025fe57 100644
--- a/axon/shaders/PlusPhasePool.wgsl
+++ b/axon/shaders/PlusPhasePool.wgsl
@@ -428,7 +428,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -728,7 +728,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -761,7 +761,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -917,82 +917,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1123,8 +1122,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1133,7 +1132,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/PlusPhasePost.wgsl b/axon/shaders/PlusPhasePost.wgsl
index 7e34edcf9..390b772d9 100644
--- a/axon/shaders/PlusPhasePost.wgsl
+++ b/axon/shaders/PlusPhasePost.wgsl
@@ -99,7 +99,7 @@ fn LayerParams_PlusPhasePost(ly: ptr, ctx: ptr, ctx: ptr, ctx: ptr u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/PlusPhaseStartNeuron.wgsl b/axon/shaders/PlusPhaseStartNeuron.wgsl
index f0cc2b40d..d8bce84dc 100644
--- a/axon/shaders/PlusPhaseStartNeuron.wgsl
+++ b/axon/shaders/PlusPhaseStartNeuron.wgsl
@@ -441,7 +441,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -741,7 +741,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -774,7 +774,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -930,82 +930,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1136,8 +1135,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1146,7 +1145,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
///////////// import: "rand.go"
alias RandFunIndex = u32;
diff --git a/axon/shaders/PoolGi.wgsl b/axon/shaders/PoolGi.wgsl
index f46af327a..07a8e55cf 100644
--- a/axon/shaders/PoolGi.wgsl
+++ b/axon/shaders/PoolGi.wgsl
@@ -434,7 +434,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -826,7 +826,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -859,7 +859,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -1015,82 +1015,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1221,8 +1220,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1231,7 +1230,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/SendSpike.wgsl b/axon/shaders/SendSpike.wgsl
index 4e110d68d..ef617a51e 100644
--- a/axon/shaders/SendSpike.wgsl
+++ b/axon/shaders/SendSpike.wgsl
@@ -92,7 +92,7 @@ fn LayerParams_SendSpike(ly: ptr, ctx: ptr, ctx: ptr, lpi: u32,pi: u32,ni: u32,di: u32) {
- Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Burst))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaSpkP))];
+ Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Burst))] = Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(CaP))];
var li = (*ly).Index;
var pil = pi - (*ly).PoolSt; // 0-n pool index
var pnn = u32(PoolNNeurons(pi));
@@ -101,10 +101,10 @@ fn LayerParams_PostSpikeSpecial(ly: ptr, ctx: ptr, ctx: ptr
if (u32((*ctx).Cycle) != u32((*ctx).ThetaCycles)-1-(*pt).Com.DelLen) {
return;
}
- sendVal *= Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[ // Burst is regular CaSpkP for all non-SuperLayer neurons
+ sendVal *= Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[ // Burst is regular CaP for all non-SuperLayer neurons
2], u32(ni),u32(di),u32(Burst))];
} else {
if (Neurons[IndexF323D(Neurons[0], Neurons[1], Neurons[2], u32(ni),u32(di),u32(Spike))] == 0) {
@@ -655,7 +655,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -958,7 +958,7 @@ const LayerRewPredNeg: LayerVars = 10;
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -991,7 +991,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -1163,82 +1163,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1369,8 +1368,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1379,7 +1378,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/SlowAdaptLayer.wgsl b/axon/shaders/SlowAdaptLayer.wgsl
index 1d05cd2f0..03b78b613 100644
--- a/axon/shaders/SlowAdaptLayer.wgsl
+++ b/axon/shaders/SlowAdaptLayer.wgsl
@@ -427,7 +427,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -821,7 +821,7 @@ fn SlowAdaptLayer(li: u32) { //gosl:kernel
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -854,7 +854,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -1010,82 +1010,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1216,8 +1215,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1226,7 +1225,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
fn AvgMaxVarIndex(vr: AvgMaxVars, phase: AvgMaxPhases, am: AvgMax) -> u32 {
return u32(poolFloatAvgMaxStart) + u32(vr)*u32(AvgMaxN)*u32(AvgMaxPhasesN) + u32(phase)*u32(AvgMaxN) + u32(am);
}
diff --git a/axon/shaders/SlowAdaptNeuron.wgsl b/axon/shaders/SlowAdaptNeuron.wgsl
index 95804d025..3cb53e159 100644
--- a/axon/shaders/SlowAdaptNeuron.wgsl
+++ b/axon/shaders/SlowAdaptNeuron.wgsl
@@ -429,7 +429,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -807,7 +807,7 @@ fn PathParams_SynScale(pt: ptr, ctx: ptr,
}
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -840,7 +840,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -1060,82 +1060,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1269,8 +1268,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1279,7 +1278,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
///////////// import: "rand.go"
alias RandFunIndex = u32;
diff --git a/axon/shaders/WtFromDWtLayer.wgsl b/axon/shaders/WtFromDWtLayer.wgsl
index 276dea7d2..bec9a8b2d 100644
--- a/axon/shaders/WtFromDWtLayer.wgsl
+++ b/axon/shaders/WtFromDWtLayer.wgsl
@@ -427,7 +427,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -820,7 +820,7 @@ fn WtFromDWtLayer(li: u32) { //gosl:kernel
///////////// import: "learn-path.go"
///////////// import: "learn.go"
-struct CaLrnParams {
+struct LearnCaParams {
Norm: f32,
SpkVGCC: i32,
SpkVgccCa: f32,
@@ -853,7 +853,7 @@ struct RLRateParams {
pad: i32,
}
struct LearnNeurParams {
- CaLearn: CaLrnParams,
+ CaLearn: LearnCaParams,
CaSpk: NeurCaParams,
LrnNMDA: NMDAParams,
TrgAvgAct: TrgAvgActParams,
@@ -1017,82 +1017,81 @@ const ISI: NeuronVars = 10;
const ISIAvg: NeuronVars = 11;
const Ext: NeuronVars = 12;
const Target: NeuronVars = 13;
-const CaSpkM: NeuronVars = 14;
-const CaSpkP: NeuronVars = 15;
-const CaSpkD: NeuronVars = 16;
-const CaSpkPM: NeuronVars = 17;
-const CaLrn: NeuronVars = 18;
-const NrnCaM: NeuronVars = 19;
-const NrnCaP: NeuronVars = 20;
-const NrnCaD: NeuronVars = 21;
-const CaDiff: NeuronVars = 22;
-const RLRate: NeuronVars = 23;
-const GnmdaSyn: NeuronVars = 24;
-const Gnmda: NeuronVars = 25;
-const GnmdaLrn: NeuronVars = 26;
-const GnmdaMaint: NeuronVars = 27;
-const NmdaCa: NeuronVars = 28;
-const Gvgcc: NeuronVars = 29;
-const VgccM: NeuronVars = 30;
-const VgccH: NeuronVars = 31;
-const VgccCa: NeuronVars = 32;
-const VgccCaInt: NeuronVars = 33;
-const Burst: NeuronVars = 34;
-const BurstPrv: NeuronVars = 35;
-const CtxtGe: NeuronVars = 36;
-const CtxtGeRaw: NeuronVars = 37;
-const CtxtGeOrig: NeuronVars = 38;
-const GgabaB: NeuronVars = 39;
-const GABAB: NeuronVars = 40;
-const GABABx: NeuronVars = 41;
-const Gak: NeuronVars = 42;
-const SSGiDend: NeuronVars = 43;
-const GknaMed: NeuronVars = 44;
-const GknaSlow: NeuronVars = 45;
-const Gkir: NeuronVars = 46;
-const KirM: NeuronVars = 47;
-const Gsk: NeuronVars = 48;
-const SKCaIn: NeuronVars = 49;
-const SKCaR: NeuronVars = 50;
-const SKCaM: NeuronVars = 51;
-const Gmahp: NeuronVars = 52;
-const MahpN: NeuronVars = 53;
-const Gsahp: NeuronVars = 54;
-const SahpCa: NeuronVars = 55;
-const SahpN: NeuronVars = 56;
-const ActM: NeuronVars = 57;
-const ActP: NeuronVars = 58;
-const SpkSt1: NeuronVars = 59;
-const SpkSt2: NeuronVars = 60;
-const SpkMax: NeuronVars = 61;
-const SpkMaxCa: NeuronVars = 62;
-const SpkBin0: NeuronVars = 63;
-const SpkBin1: NeuronVars = 64;
-const SpkBin2: NeuronVars = 65;
-const SpkBin3: NeuronVars = 66;
-const SpkBin4: NeuronVars = 67;
-const SpkBin5: NeuronVars = 68;
-const SpkBin6: NeuronVars = 69;
-const SpkBin7: NeuronVars = 70;
-const SpkPrv: NeuronVars = 71;
-const GeNoise: NeuronVars = 72;
-const GeNoiseP: NeuronVars = 73;
-const GiNoise: NeuronVars = 74;
-const GiNoiseP: NeuronVars = 75;
-const GeExt: NeuronVars = 76;
-const GeRaw: NeuronVars = 77;
-const GeSyn: NeuronVars = 78;
-const GiRaw: NeuronVars = 79;
-const GiSyn: NeuronVars = 80;
-const GeInt: NeuronVars = 81;
-const GeIntNorm: NeuronVars = 82;
-const GiInt: NeuronVars = 83;
-const GModRaw: NeuronVars = 84;
-const GModSyn: NeuronVars = 85;
-const SMaintP: NeuronVars = 86;
-const GMaintRaw: NeuronVars = 87;
-const GMaintSyn: NeuronVars = 88;
-const NeurFlags: NeuronVars = 89;
+const CaM: NeuronVars = 14;
+const CaP: NeuronVars = 15;
+const CaD: NeuronVars = 16;
+const LearnCa: NeuronVars = 17;
+const LearnCaM: NeuronVars = 18;
+const LearnCaP: NeuronVars = 19;
+const LearnCaD: NeuronVars = 20;
+const CaDiff: NeuronVars = 21;
+const RLRate: NeuronVars = 22;
+const GnmdaSyn: NeuronVars = 23;
+const Gnmda: NeuronVars = 24;
+const GnmdaLrn: NeuronVars = 25;
+const GnmdaMaint: NeuronVars = 26;
+const NmdaCa: NeuronVars = 27;
+const Gvgcc: NeuronVars = 28;
+const VgccM: NeuronVars = 29;
+const VgccH: NeuronVars = 30;
+const VgccCa: NeuronVars = 31;
+const VgccCaInt: NeuronVars = 32;
+const Burst: NeuronVars = 33;
+const BurstPrv: NeuronVars = 34;
+const CtxtGe: NeuronVars = 35;
+const CtxtGeRaw: NeuronVars = 36;
+const CtxtGeOrig: NeuronVars = 37;
+const GgabaB: NeuronVars = 38;
+const GABAB: NeuronVars = 39;
+const GABABx: NeuronVars = 40;
+const Gak: NeuronVars = 41;
+const SSGiDend: NeuronVars = 42;
+const GknaMed: NeuronVars = 43;
+const GknaSlow: NeuronVars = 44;
+const Gkir: NeuronVars = 45;
+const KirM: NeuronVars = 46;
+const Gsk: NeuronVars = 47;
+const SKCaIn: NeuronVars = 48;
+const SKCaR: NeuronVars = 49;
+const SKCaM: NeuronVars = 50;
+const Gmahp: NeuronVars = 51;
+const MahpN: NeuronVars = 52;
+const Gsahp: NeuronVars = 53;
+const SahpCa: NeuronVars = 54;
+const SahpN: NeuronVars = 55;
+const ActM: NeuronVars = 56;
+const ActP: NeuronVars = 57;
+const Beta1: NeuronVars = 58;
+const Beta2: NeuronVars = 59;
+const SpkMax: NeuronVars = 60;
+const SpkMaxCa: NeuronVars = 61;
+const SpkBin0: NeuronVars = 62;
+const SpkBin1: NeuronVars = 63;
+const SpkBin2: NeuronVars = 64;
+const SpkBin3: NeuronVars = 65;
+const SpkBin4: NeuronVars = 66;
+const SpkBin5: NeuronVars = 67;
+const SpkBin6: NeuronVars = 68;
+const SpkBin7: NeuronVars = 69;
+const SpkPrv: NeuronVars = 70;
+const GeNoise: NeuronVars = 71;
+const GeNoiseP: NeuronVars = 72;
+const GiNoise: NeuronVars = 73;
+const GiNoiseP: NeuronVars = 74;
+const GeExt: NeuronVars = 75;
+const GeRaw: NeuronVars = 76;
+const GeSyn: NeuronVars = 77;
+const GiRaw: NeuronVars = 78;
+const GiSyn: NeuronVars = 79;
+const GeInt: NeuronVars = 80;
+const GeIntNorm: NeuronVars = 81;
+const GiInt: NeuronVars = 82;
+const GModRaw: NeuronVars = 83;
+const GModSyn: NeuronVars = 84;
+const SMaintP: NeuronVars = 85;
+const GMaintRaw: NeuronVars = 86;
+const GMaintSyn: NeuronVars = 87;
+const NeurFlags: NeuronVars = 88;
alias NeuronAvgVars = i32; //enums:enum
const ActAvg: NeuronAvgVars = 0;
const AvgPct: NeuronAvgVars = 1;
@@ -1223,8 +1222,8 @@ const AMMinus: AvgMaxPhases = 1;
const AMPlus: AvgMaxPhases = 2;
const AMPrev: AvgMaxPhases = 3;
alias AvgMaxVars = i32; //enums:enum -trim-prefix AM
-const AMCaSpkP: AvgMaxVars = 0;
-const AMCaSpkD: AvgMaxVars = 1;
+const AMCaP: AvgMaxVars = 0;
+const AMCaD: AvgMaxVars = 1;
const AMSpkMax: AvgMaxVars = 2;
const AMAct: AvgMaxVars = 3;
const AMGeInt: AvgMaxVars = 4;
@@ -1233,7 +1232,7 @@ const AMAvgDif: AvgMaxVars = 6;
const poolFloatAvgMaxStart = InhibVarsN;
const PoolVarsN = poolFloatAvgMaxStart + InhibVars(i32(AvgMaxVarsN)*i32(AvgMaxN)*i32(AvgMaxPhasesN));
const PoolIntVarsTot = PoolIntAvgMaxStart + PoolIntVars(i32(AvgMaxVarsN)*i32(AvgMaxN));
-const avgMaxToNeuron = array(CaSpkP, CaSpkD, SpkMax, Act, GeInt, GiInt);
+const avgMaxToNeuron = array(CaP, CaD, SpkMax, Act, GeInt, GiInt);
///////////// import: "rand.go"
alias RandFunIndex = u32;
diff --git a/axon/shaders/WtFromDWtSyn.wgsl b/axon/shaders/WtFromDWtSyn.wgsl
index ebbd3cb61..0d4fdb1d8 100644
--- a/axon/shaders/WtFromDWtSyn.wgsl
+++ b/axon/shaders/WtFromDWtSyn.wgsl
@@ -411,7 +411,7 @@ const ViewTimesN: ViewTimes = 7;
const DAModTypesN: DAModTypes = 4;
const ValenceTypesN: ValenceTypes = 3;
const NeuronFlagsN: NeuronFlags = 9;
-const NeuronVarsN: NeuronVars = 90;
+const NeuronVarsN: NeuronVars = 89;
const NeuronAvgVarsN: NeuronAvgVars = 7;
const NeuronIndexVarsN: NeuronIndexVars = 3;
const PathTypesN: PathTypes = 12;
@@ -760,7 +760,7 @@ fn PathParams_WtFromDWtSynNoLimits(pt: ptr, ctx: ptr 0, decays over time so intrinsic circuit dynamics have to take over. For single-step copy-based cases, set to 0, while longer-time-scale dynamics should use 50 (80 for 280 cycles)"}, {Name: "OFCposPT", Doc: "OFCposPT is set for the OFCposPT PTMaintLayer, which sets the\nGvOFCposPTMaint global variable."}, {Name: "DecayDt", Doc: "1 / tau"}}})
-var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PulvParams", IDName: "pulv-params", Doc: "PulvParams provides parameters for how the plus-phase (outcome)\nstate of Pulvinar thalamic relay cell neurons is computed from\nthe corresponding driver neuron Burst activation (or CaSpkP if not Super)", Fields: []types.Field{{Name: "DriveScale", Doc: "multiplier on driver input strength, multiplies CaSpkP from driver layer to produce Ge excitatory input to Pulv unit."}, {Name: "FullDriveAct", Doc: "Level of Max driver layer CaSpkP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning."}, {Name: "DriveLayIndex", Doc: "index of layer that generates the driving activity into this one -- set via SetBuildConfig(DriveLayName) setting"}, {Name: "pad"}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.PulvParams", IDName: "pulv-params", Doc: "PulvParams provides parameters for how the plus-phase (outcome)\nstate of Pulvinar thalamic relay cell neurons is computed from\nthe corresponding driver neuron Burst activation (or CaP if not Super)", Fields: []types.Field{{Name: "DriveScale", Doc: "multiplier on driver input strength, multiplies CaP from driver layer to produce Ge excitatory input to Pulv unit."}, {Name: "FullDriveAct", Doc: "Level of Max driver layer CaP at which the drivers fully drive the burst phase activation. If there is weaker driver input, then (Max/FullDriveAct) proportion of the non-driver inputs remain and this critically prevents the network from learning to turn activation off, which is difficult and severely degrades learning."}, {Name: "DriveLayIndex", Doc: "index of layer that generates the driving activity into this one -- set via SetBuildConfig(DriveLayName) setting"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.GlobalScalarVars", IDName: "global-scalar-vars", Doc: "GlobalScalarVars are network-wide scalar variables, such as neuromodulators,\nreward, etc including the state for the Rubicon phasic dopamine model.\nThese are stored in the Network.GlobalScalars tensor and corresponding global variable."})
@@ -62,19 +62,19 @@ var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerIndex
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerInhibIndexes", IDName: "layer-inhib-indexes", Doc: "LayerInhibIndexes contains indexes of layers for between-layer inhibition.", Fields: []types.Field{{Name: "Index1", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib1Name if present -- -1 if not used"}, {Name: "Index2", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib2Name if present -- -1 if not used"}, {Name: "Index3", Doc: "idx of Layer to get layer-level inhibition from -- set during Build from BuildConfig LayInhib3Name if present -- -1 if not used"}, {Name: "Index4", Doc: "idx of Layer to geta layer-level inhibition from -- set during Build from BuildConfig LayInhib4Name if present -- -1 if not used"}}})
-var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerParams", IDName: "layer-params", Doc: "LayerParams contains all of the layer parameters.\nThese values must remain constant over the course of computation.\nOn the GPU, they are loaded into a uniform.", Fields: []types.Field{{Name: "Type", Doc: "Type is the functional type of layer, which determines the code path\nfor specialized layer types, and is synchronized with [Layer.Type]."}, {Name: "Index", Doc: "Index of this layer in [Layers] list."}, {Name: "MaxData", Doc: "MaxData is the maximum number of data parallel elements."}, {Name: "PoolSt", Doc: "PoolSt is the start of pools for this layer; first one is always the layer-wide pool."}, {Name: "Acts", Doc: "Activation parameters and methods for computing activations"}, {Name: "Inhib", Doc: "Inhibition parameters and methods for computing layer-level inhibition"}, {Name: "LayInhib", Doc: "LayInhib has indexes of layers that contribute between-layer inhibition\n to this layer. Set these indexes via BuildConfig LayInhibXName (X = 1, 2...)."}, {Name: "Learn", Doc: "Learn has learning parameters and methods that operate at the neuron level."}, {Name: "Bursts", Doc: "Bursts has [BurstParams] that determine how the 5IB Burst activation\nis computed from CaSpkP integrated spiking values in Super layers."}, {Name: "CT", Doc: "CT has params for the CT corticothalamic layer and PTPred layer that\ngenerates predictions over the Pulvinar using context. Uses the CtxtGe\nexcitatory input plus stronger NMDA channels to maintain context trace."}, {Name: "Pulv", Doc: "Pulv has parameters for how the plus-phase (outcome) state of Pulvinar\nthalamic relay cell neurons is computed from the corresponding driver\nneuron Burst activation (or CaSpkP if not Super)."}, {Name: "Matrix", Doc: "Matrix has parameters for BG Striatum Matrix MSN layers, which are\nthe main Go / NoGo gating units in BG. GateThr also used in BGThal."}, {Name: "GP", Doc: "GP has params for GP (globus pallidus) of the BG layers."}, {Name: "LDT", Doc: "LDT has parameters for laterodorsal tegmentum ACh salience neuromodulatory\nsignal, driven by superior colliculus stimulus novelty, US input / absence,\nand OFC / ACC inhibition."}, {Name: "VTA", Doc: "VTA has parameters for ventral tegmental area dopamine (DA) based on\nLHb PVDA (primary value -- at US time, computed at start of each trial\nand stored in LHbPVDA global value) and Amygdala (CeM) CS / learned\nvalue (LV) activations, which update every cycle."}, {Name: "RWPred", Doc: "RWPred has parameters for reward prediction using a simple Rescorla-Wagner\nlearning rule (i.e., PV learning in the Rubicon framework)."}, {Name: "RWDa", Doc: "RWDa has parameters for reward prediction dopamine using a simple\nRescorla-Wagner learning rule (i.e., PV learning in the Rubicon framework)."}, {Name: "TDInteg", Doc: "TDInteg has parameters for temporal differences (TD) reward integration layer."}, {Name: "TDDa", Doc: "TDDa has parameters for dopamine (DA) signal as the temporal difference\n(TD) between the TDIntegLayer activations in the minus and plus phase."}, {Name: "Indexes", Doc: "Indexes has recv and send pathway array access info."}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerParams", IDName: "layer-params", Doc: "LayerParams contains all of the layer parameters.\nThese values must remain constant over the course of computation.\nOn the GPU, they are loaded into a uniform.", Fields: []types.Field{{Name: "Type", Doc: "Type is the functional type of layer, which determines the code path\nfor specialized layer types, and is synchronized with [Layer.Type]."}, {Name: "Index", Doc: "Index of this layer in [Layers] list."}, {Name: "MaxData", Doc: "MaxData is the maximum number of data parallel elements."}, {Name: "PoolSt", Doc: "PoolSt is the start of pools for this layer; first one is always the layer-wide pool."}, {Name: "Acts", Doc: "Activation parameters and methods for computing activations"}, {Name: "Inhib", Doc: "Inhibition parameters and methods for computing layer-level inhibition"}, {Name: "LayInhib", Doc: "LayInhib has indexes of layers that contribute between-layer inhibition\n to this layer. Set these indexes via BuildConfig LayInhibXName (X = 1, 2...)."}, {Name: "Learn", Doc: "Learn has learning parameters and methods that operate at the neuron level."}, {Name: "Bursts", Doc: "Bursts has [BurstParams] that determine how the 5IB Burst activation\nis computed from CaP integrated spiking values in Super layers."}, {Name: "CT", Doc: "CT has params for the CT corticothalamic layer and PTPred layer that\ngenerates predictions over the Pulvinar using context. Uses the CtxtGe\nexcitatory input plus stronger NMDA channels to maintain context trace."}, {Name: "Pulv", Doc: "Pulv has parameters for how the plus-phase (outcome) state of Pulvinar\nthalamic relay cell neurons is computed from the corresponding driver\nneuron Burst activation (or CaP if not Super)."}, {Name: "Matrix", Doc: "Matrix has parameters for BG Striatum Matrix MSN layers, which are\nthe main Go / NoGo gating units in BG. GateThr also used in BGThal."}, {Name: "GP", Doc: "GP has params for GP (globus pallidus) of the BG layers."}, {Name: "LDT", Doc: "LDT has parameters for laterodorsal tegmentum ACh salience neuromodulatory\nsignal, driven by superior colliculus stimulus novelty, US input / absence,\nand OFC / ACC inhibition."}, {Name: "VTA", Doc: "VTA has parameters for ventral tegmental area dopamine (DA) based on\nLHb PVDA (primary value -- at US time, computed at start of each trial\nand stored in LHbPVDA global value) and Amygdala (CeM) CS / learned\nvalue (LV) activations, which update every cycle."}, {Name: "RWPred", Doc: "RWPred has parameters for reward prediction using a simple Rescorla-Wagner\nlearning rule (i.e., PV learning in the Rubicon framework)."}, {Name: "RWDa", Doc: "RWDa has parameters for reward prediction dopamine using a simple\nRescorla-Wagner learning rule (i.e., PV learning in the Rubicon framework)."}, {Name: "TDInteg", Doc: "TDInteg has parameters for temporal differences (TD) reward integration layer."}, {Name: "TDDa", Doc: "TDDa has parameters for dopamine (DA) signal as the temporal difference\n(TD) between the TDIntegLayer activations in the minus and plus phase."}, {Name: "Indexes", Doc: "Indexes has recv and send pathway array access info."}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerTypes", IDName: "layer-types", Doc: "LayerTypes enumerates all the different types of layers,\nfor the different algorithm types supported.\nClass parameter styles automatically key off of these types."})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LayerVars", IDName: "layer-vars", Doc: "LayerVars are layer-level state values."})
-var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.CaLrnParams", IDName: "ca-lrn-params", Doc: "CaLrnParams parameterizes the neuron-level calcium signals driving learning:\nCaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or\nuse the more complex and dynamic VGCC channel directly.\nCaLrn is then integrated in a cascading manner at multiple time scales:\nCaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}, {Tool: "gosl", Directive: "import", Args: []string{"github.com/emer/axon/v2/kinase"}}}, Fields: []types.Field{{Name: "Norm", Doc: "denomenator used for normalizing CaLrn, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance"}, {Name: "SpkVGCC", Doc: "use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike"}, {Name: "SpkVgccCa", Doc: "multiplier on spike for computing Ca contribution to CaLrn in SpkVGCC mode"}, {Name: "VgccTau", Doc: "time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in CaLrn"}, {Name: "Dt", Doc: "time constants for integrating CaLrn across M, P and D cascading levels"}, {Name: "UpdateThr", Doc: "Threshold on CaSpkP CaSpkD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default."}, {Name: "VgccDt", Doc: "rate = 1 / tau"}, {Name: "NormInv", Doc: "= 1 / Norm"}, {Name: "pad"}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LearnCaParams", IDName: "learn-ca-params", Doc: "LearnCaParams parameterizes the neuron-level calcium signals driving learning:\nLearnCa = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or\nuse the more complex and dynamic VGCC channel directly.\nLearnCa is then integrated in a cascading manner at multiple time scales:\nCaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase).", Directives: []types.Directive{{Tool: "gosl", Directive: "start"}, {Tool: "gosl", Directive: "import", Args: []string{"github.com/emer/axon/v2/kinase"}}}, Fields: []types.Field{{Name: "Norm", Doc: "denomenator used for normalizing LearnCa, so the max is roughly 1 - 1.5 or so, which works best in terms of previous standard learning rules, and overall learning performance"}, {Name: "SpkVGCC", Doc: "use spikes to generate VGCC instead of actual VGCC current -- see SpkVGCCa for calcium contribution from each spike"}, {Name: "SpkVgccCa", Doc: "multiplier on spike for computing Ca contribution to LearnCa in SpkVGCC mode"}, {Name: "VgccTau", Doc: "time constant of decay for VgccCa calcium -- it is highly transient around spikes, so decay and diffusion factors are more important than for long-lasting NMDA factor. VgccCa is integrated separately int VgccCaInt prior to adding into NMDA Ca in LearnCa"}, {Name: "Dt", Doc: "time constants for integrating LearnCa across M, P and D cascading levels"}, {Name: "UpdateThr", Doc: "Threshold on CaP CaD value for updating synapse-level Ca values (SynCa) -- this is purely a performance optimization that excludes random infrequent spikes -- 0.05 works well on larger networks but not smaller, which require the .01 default."}, {Name: "VgccDt", Doc: "rate = 1 / tau"}, {Name: "NormInv", Doc: "= 1 / Norm"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.TrgAvgActParams", IDName: "trg-avg-act-params", Doc: "TrgAvgActParams govern the target and actual long-term average activity in neurons.\nTarget value is adapted by neuron-wise error and difference in actual vs. target.\ndrives synaptic scaling at a slow timescale (Network.SlowInterval).", Fields: []types.Field{{Name: "GiBaseInit", Doc: "if this is > 0, then each neuron's GiBase is initialized as this proportion of TrgRange.Max - TrgAvg -- gives neurons differences in intrinsic inhibition / leak as a starting bias. This is independent of using the target values to scale synaptic weights."}, {Name: "RescaleOn", Doc: "whether to use target average activity mechanism to rescale synaptic weights, so that activity tracks the target values"}, {Name: "ErrLRate", Doc: "learning rate for adjustments to Trg value based on unit-level error signal. Population TrgAvg values are renormalized to fixed overall average in TrgRange. Generally, deviating from the default doesn't make much difference."}, {Name: "SynScaleRate", Doc: "rate parameter for how much to scale synaptic weights in proportion to the AvgDif between target and actual proportion activity -- this determines the effective strength of the constraint, and larger models may need more than the weaker default value."}, {Name: "SubMean", Doc: "amount of mean trg change to subtract -- 1 = full zero sum. 1 works best in general -- but in some cases it may be better to start with 0 and then increase using network SetSubMean method at a later point."}, {Name: "Permute", Doc: "permute the order of TrgAvg values within layer -- otherwise they are just assigned in order from highest to lowest for easy visualization -- generally must be true if any topographic weights are being used"}, {Name: "Pool", Doc: "use pool-level target values if pool-level inhibition and 4D pooled layers are present -- if pool sizes are relatively small, then may not be useful to distribute targets just within pool"}, {Name: "pad"}, {Name: "TrgRange", Doc: "range of target normalized average activations -- individual neurons are assigned values within this range to TrgAvg, and clamped within this range."}}})
-var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.RLRateParams", IDName: "rl-rate-params", Doc: "RLRateParams are recv neuron learning rate modulation parameters.\nHas two factors: the derivative of the sigmoid based on CaSpkD\nactivity levels, and based on the phase-wise differences in activity (Diff).", Fields: []types.Field{{Name: "On", Doc: "use learning rate modulation"}, {Name: "SigmoidLinear", Doc: "use a linear sigmoid function: if act > .5: 1-act; else act\notherwise use the actual sigmoid derivative which is squared: a(1-a)"}, {Name: "SigmoidMin", Doc: "minimum learning rate multiplier for sigmoidal act (1-act) factor,\nwhich prevents lrate from going too low for extreme values.\nSet to 1 to disable Sigmoid derivative factor, which is default for Target layers."}, {Name: "Diff", Doc: "modulate learning rate as a function of plus - minus differences"}, {Name: "SpkThr", Doc: "threshold on Max(CaSpkP, CaSpkD) below which Min lrate applies.\nmust be > 0 to prevent div by zero."}, {Name: "DiffThr", Doc: "threshold on recv neuron error delta, i.e., |CaSpkP - CaSpkD| below which lrate is at Min value"}, {Name: "Min", Doc: "for Diff component, minimum learning rate value when below ActDiffThr"}, {Name: "pad"}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.RLRateParams", IDName: "rl-rate-params", Doc: "RLRateParams are recv neuron learning rate modulation parameters.\nHas two factors: the derivative of the sigmoid based on CaD\nactivity levels, and based on the phase-wise differences in activity (Diff).", Fields: []types.Field{{Name: "On", Doc: "use learning rate modulation"}, {Name: "SigmoidLinear", Doc: "use a linear sigmoid function: if act > .5: 1-act; else act\notherwise use the actual sigmoid derivative which is squared: a(1-a)"}, {Name: "SigmoidMin", Doc: "minimum learning rate multiplier for sigmoidal act (1-act) factor,\nwhich prevents lrate from going too low for extreme values.\nSet to 1 to disable Sigmoid derivative factor, which is default for Target layers."}, {Name: "Diff", Doc: "modulate learning rate as a function of plus - minus differences"}, {Name: "SpkThr", Doc: "threshold on Max(CaP, CaD) below which Min lrate applies.\nmust be > 0 to prevent div by zero."}, {Name: "DiffThr", Doc: "threshold on recv neuron error delta, i.e., |CaP - CaD| below which lrate is at Min value"}, {Name: "Min", Doc: "for Diff component, minimum learning rate value when below ActDiffThr"}, {Name: "pad"}}})
-var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LearnNeurParams", IDName: "learn-neur-params", Doc: "axon.LearnNeurParams manages learning-related parameters at the neuron-level.\nThis is mainly the running average activations that drive learning", Fields: []types.Field{{Name: "CaLearn", Doc: "parameterizes the neuron-level calcium signals driving learning: CaLrn = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. CaLrn is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase)."}, {Name: "CaSpk", Doc: "parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdateThr and RLRate as a proxy for the activation (spiking) based learning signal."}, {Name: "LrnNMDA", Doc: "NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes"}, {Name: "TrgAvgAct", Doc: "synaptic scaling parameters for regulating overall average activity compared to neuron's own target level"}, {Name: "RLRate", Doc: "recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaSpkP - CaSpkD| / Max(CaSpkP, CaSpkD)"}, {Name: "NeuroMod", Doc: "neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms"}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LearnNeurParams", IDName: "learn-neur-params", Doc: "axon.LearnNeurParams manages learning-related parameters at the neuron-level.\nThis is mainly the running average activations that drive learning", Fields: []types.Field{{Name: "CaLearn", Doc: "parameterizes the neuron-level calcium signals driving learning: LearnCa = NMDA + VGCC Ca sources, where VGCC can be simulated from spiking or use the more complex and dynamic VGCC channel directly. LearnCa is then integrated in a cascading manner at multiple time scales: CaM (as in calmodulin), CaP (ltP, CaMKII, plus phase), CaD (ltD, DAPK1, minus phase)."}, {Name: "CaSpk", Doc: "parameterizes the neuron-level spike-driven calcium signals, starting with CaSyn that is integrated at the neuron level, and drives synapse-level, pre * post Ca integration, which provides the Tr trace that multiplies error signals, and drives learning directly for Target layers. CaSpk* values are integrated separately at the Neuron level and used for UpdateThr and RLRate as a proxy for the activation (spiking) based learning signal."}, {Name: "LrnNMDA", Doc: "NMDA channel parameters used for learning, vs. the ones driving activation -- allows exploration of learning parameters independent of their effects on active maintenance contributions of NMDA, and may be supported by different receptor subtypes"}, {Name: "TrgAvgAct", Doc: "synaptic scaling parameters for regulating overall average activity compared to neuron's own target level"}, {Name: "RLRate", Doc: "recv neuron learning rate modulation params -- an additional error-based modulation of learning for receiver side: RLRate = |CaP - CaD| / Max(CaP, CaD)"}, {Name: "NeuroMod", Doc: "neuromodulation effects on learning rate and activity, as a function of layer-level DA and ACh values, which are updated from global Context values, and computed from reinforcement learning algorithms"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.SWtInitParams", IDName: "s-wt-init-params", Doc: "SWtInitParams for initial SWt values", Fields: []types.Field{{Name: "SPct", Doc: "how much of the initial random weights are captured in the SWt values -- rest goes into the LWt values. 1 gives the strongest initial biasing effect, for larger models that need more structural support. 0.5 should work for most models where stronger constraints are not needed."}, {Name: "Mean", Doc: "target mean weight values across receiving neuron's pathway -- the mean SWt values are constrained to remain at this value. some pathways may benefit from lower mean of .4"}, {Name: "Var", Doc: "initial variance in weight values, prior to constraints."}, {Name: "Sym", Doc: "symmetrize the initial weight values with those in reciprocal pathway -- typically true for bidirectional excitatory connections"}}})
@@ -164,7 +164,7 @@ var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.LDTParams"
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.VTAParams", IDName: "vta-params", Doc: "VTAParams are for computing overall VTA DA based on LHb PVDA\n(primary value -- at US time, computed at start of each trial\nand stored in LHbPVDA global value)\nand Amygdala (CeM) CS / learned value (LV) activations, which update\nevery cycle.", Fields: []types.Field{{Name: "CeMGain", Doc: "gain on CeM activity difference (CeMPos - CeMNeg) for generating LV CS-driven dopamine values"}, {Name: "LHbGain", Doc: "gain on computed LHb DA (Burst - Dip) -- for controlling DA levels"}, {Name: "AChThr", Doc: "threshold on ACh level required to generate LV CS-driven dopamine burst"}, {Name: "pad"}}})
-var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.BLAPathParams", IDName: "bla-path-params", Doc: "BLAPathParams has parameters for basolateral amygdala learning.\nLearning is driven by the Tr trace as function of ACh * Send Act\nrecorded prior to US, and at US, recv unit delta: CaSpkP - SpkPrv\ntimes normalized GeIntNorm for recv unit credit assignment.\nThe Learn.Trace.Tau time constant determines trace updating over trials\nwhen ACh is above threshold -- this determines strength of second-order\nconditioning -- default of 1 means none, but can be increased as needed.", Directives: []types.Directive{{Tool: "gosl", Directive: "start", Args: []string{"rubicon_paths"}}}, Fields: []types.Field{{Name: "NegDeltaLRate", Doc: "use 0.01 for acquisition (don't unlearn) and 1 for extinction -- negative delta learning rate multiplier"}, {Name: "AChThr", Doc: "threshold on this layer's ACh level for trace learning updates"}, {Name: "USTrace", Doc: "proportion of US time stimulus activity to use for the trace component of"}, {Name: "pad"}}})
+var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.BLAPathParams", IDName: "bla-path-params", Doc: "BLAPathParams has parameters for basolateral amygdala learning.\nLearning is driven by the Tr trace as function of ACh * Send Act\nrecorded prior to US, and at US, recv unit delta: CaP - SpkPrv\ntimes normalized GeIntNorm for recv unit credit assignment.\nThe Learn.Trace.Tau time constant determines trace updating over trials\nwhen ACh is above threshold -- this determines strength of second-order\nconditioning -- default of 1 means none, but can be increased as needed.", Directives: []types.Directive{{Tool: "gosl", Directive: "start", Args: []string{"rubicon_paths"}}}, Fields: []types.Field{{Name: "NegDeltaLRate", Doc: "use 0.01 for acquisition (don't unlearn) and 1 for extinction -- negative delta learning rate multiplier"}, {Name: "AChThr", Doc: "threshold on this layer's ACh level for trace learning updates"}, {Name: "USTrace", Doc: "proportion of US time stimulus activity to use for the trace component of"}, {Name: "pad"}}})
var _ = types.AddType(&types.Type{Name: "github.com/emer/axon/v2/axon.DriveParams", IDName: "drive-params", Doc: "DriveParams manages the drive parameters for computing and updating drive state.\nMost of the params are for optional case where drives are automatically\nupdated based on US consumption (which satisfies drives) and time passing\n(which increases drives).", Fields: []types.Field{{Name: "DriveMin", Doc: "minimum effective drive value, which is an automatic baseline ensuring\nthat a positive US results in at least some minimal level of reward.\nUnlike Base values, this is not reflected in the activity of the drive\nvalues, and applies at the time of reward calculation as a minimum baseline."}, {Name: "Base", Doc: "baseline levels for each drive, which is what they naturally trend toward\nin the absence of any input. Set inactive drives to 0 baseline,\nactive ones typically elevated baseline (0-1 range)."}, {Name: "Tau", Doc: "time constants in ThetaCycle (trial) units for natural update toward\nBase values. 0 values means no natural update (can be updated externally)."}, {Name: "Satisfaction", Doc: "decrement in drive value when US is consumed, thus partially satisfying\nthe drive. Positive values are subtracted from current Drive value."}, {Name: "Dt", Doc: "1/Tau"}}})
diff --git a/chans/skca.go b/chans/skca.go
index 1c6806e04..9382a3f63 100644
--- a/chans/skca.go
+++ b/chans/skca.go
@@ -41,7 +41,7 @@ type SKCaParams struct {
// SKCaR released calcium decay time constant
CaRDecayTau float32 `default:"150,200"`
- // level of time-integrated spiking activity (CaSpkD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge.
+ // level of time-integrated spiking activity (CaD) below which CaIn intracelluar stores are replenished -- a low threshold can be used to require minimal activity to recharge -- set to a high value (e.g., 10) for constant recharge.
CaInThr float32 `default:"0.01"`
// time constant in msec for storing CaIn when activity is below CaInThr
diff --git a/examples/bench_lvis/bench_lvis.go b/examples/bench_lvis/bench_lvis.go
index c1cba9108..9deff22bb 100644
--- a/examples/bench_lvis/bench_lvis.go
+++ b/examples/bench_lvis/bench_lvis.go
@@ -225,7 +225,6 @@ func TrainNet(ctx *axon.Context, net *axon.Network, pats, epcLog *table.Table, p
}
if qtr == 2 {
net.MinusPhase()
- ctx.NewPhase(true)
net.PlusPhaseStart()
}
}
diff --git a/examples/choose/choose.go b/examples/choose/choose.go
index 6d46d1ca8..fce6abeb7 100644
--- a/examples/choose/choose.go
+++ b/examples/choose/choose.go
@@ -602,7 +602,7 @@ func (ss *Sim) TakeAction(net *axon.Network) {
// DecodeAct decodes the VL ActM state to find closest action pattern
func (ss *Sim) DecodeAct(ev *armaze.Env, di int) armaze.Actions {
- vt := ss.Stats.SetLayerTensor(ss.Net, "VL", "CaSpkP", di) // was "Act"
+ vt := ss.Stats.SetLayerTensor(ss.Net, "VL", "CaP", di) // was "Act"
return ev.DecodeAct(vt)
}
@@ -789,7 +789,7 @@ func (ss *Sim) TrialStats(di int) {
axon.GlobalScalars[axon.GvPVnegVar, diu] = nan
}
- ss.Stats.SetFloat32("SC", ss.Net.LayerByName("SC").Pool(0, 0).AvgMax.CaSpkD.Cycle.Max)
+ ss.Stats.SetFloat32("SC", ss.Net.LayerByName("SC").Pool(0, 0).AvgMax.CaD.Cycle.Max)
var allGood float64
agN := 0
@@ -1304,7 +1304,7 @@ func (ss *Sim) UpdateEnvGUI(mode etime.Modes) {
drv := axon.GlbUSposV(ctx, diu, axon.GvDrives, i)
us := axon.GlbUSposV(ctx, diu, axon.GvUSpos, i)
ofcP := ofcPosUS.Pool(i+1, diu)
- ofc := ofcP.AvgMax.CaSpkD.Plus.Avg * ofcmul
+ ofc := ofcP.AvgMax.CaD.Plus.Avg * ofcmul
dp.SetFloat("Drive", int(i), float64(drv))
dp.SetFloat("USin", int(i), float64(us))
dp.SetFloat("OFC", int(i), float64(ofc))
@@ -1315,7 +1315,7 @@ func (ss *Sim) UpdateEnvGUI(mode etime.Modes) {
for i := uint32(0); i < nn; i++ {
us := axon.GlbUSnegV(ctx, diu, axon.GvUSneg, i)
ofcP := ofcNegUS.Pool(i+1, diu)
- ofc := ofcP.AvgMax.CaSpkD.Plus.Avg * ofcmul
+ ofc := ofcP.AvgMax.CaD.Plus.Avg * ofcmul
dn.SetFloat("USin", int(i), float64(us))
dn.SetFloat("OFC", int(i), float64(ofc))
}
diff --git a/examples/deep_fsa/config.go b/examples/deep_fsa/config.go
index 63fa9f3db..535425e9d 100644
--- a/examples/deep_fsa/config.go
+++ b/examples/deep_fsa/config.go
@@ -97,7 +97,7 @@ type RunConfig struct {
// Trials is the total number of trials per epoch.
// Should be an even multiple of NData.
- Trials int `default:"32"`
+ Trials int `default:"196"`
// Cycles is the total number of cycles per trial: at least 200.
Cycles int `default:"200"`
@@ -136,6 +136,18 @@ type LogConfig struct {
// Config is a standard Sim config -- use as a starting point.
type Config struct {
+ // Name is the short name of the sim.
+ Name string `default:"DeepFSA"`
+
+ // Title is the longer title of the sim.
+ Title string `default:"DeepAxon Finite State Automaton"`
+
+ // URL is a link to the online README or other documentation for this sim.
+ URL string `default:"https://github.com/emer/axon/blob/main/examples/deep_fsa/README.md"`
+
+ // Doc is brief documentation of the sim.
+ Doc string `default:"This demonstrates a basic deep predictive learning Axon model on the Finite State Automaton problem (e.g., the Reber grammar). The network learns the underlying grammar that generates partially ambiguous observable state tokens, strictly through errors in predicting the sequences of these tokens."`
+
// Includes has a list of additional config files to include.
// After configuration, it contains list of include files added.
Includes []string
diff --git a/examples/deep_fsa/deep_fsa.go b/examples/deep_fsa/deep_fsa.go
index c4409b744..788e9a9bc 100644
--- a/examples/deep_fsa/deep_fsa.go
+++ b/examples/deep_fsa/deep_fsa.go
@@ -27,13 +27,15 @@ import (
"github.com/emer/emergent/v2/egui"
"github.com/emer/emergent/v2/env"
"github.com/emer/emergent/v2/looper"
+ "github.com/emer/emergent/v2/netview"
"github.com/emer/emergent/v2/paths"
)
func main() {
- opts := cli.DefaultOptions("deep_fsa", "Deep FSA")
- opts.DefaultFiles = append(opts.DefaultFiles, "config.toml")
cfg := &Config{}
+ cli.SetFromDefaults(cfg)
+ opts := cli.DefaultOptions(cfg.Name, cfg.Title)
+ opts.DefaultFiles = append(opts.DefaultFiles, "config.toml")
cli.Run(opts, cfg, RunSim)
}
@@ -123,7 +125,7 @@ func RunSim(cfg *Config) error {
func (ss *Sim) Run() {
ss.Root, _ = tensorfs.NewDir("Root")
- ss.Net = axon.NewNetwork("RA25")
+ ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag)
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
@@ -532,7 +534,7 @@ func (ss *Sim) ConfigStats() {
// up to a point, it is good to use loops over stats in one function,
// to reduce repetition of boilerplate.
- statNames := []string{"CorSim", "UnitErr", "Err", "NZero", "FirstZero", "LastZero"}
+ statNames := []string{"CorSim", "UnitErr", "Err", "Output", "NZero", "FirstZero", "LastZero"}
ss.AddStat(func(mode Modes, level Levels, phase StatsPhase) {
for _, name := range statNames {
if name == "NZero" && (mode != Train || level == Trial) {
@@ -543,7 +545,8 @@ func (ss *Sim) ConfigStats() {
levelDir := modeDir.RecycleDir(level.String())
subDir := modeDir.RecycleDir((level - 1).String()) // note: will fail for Cycle
tsr := levelDir.Float64(name)
- ndata := int(ss.Net.Context().NData)
+ ctx := ss.Net.Context()
+ ndata := int(ctx.NData)
var stat float64
if phase == Start {
tsr.SetNumRows(0)
@@ -552,7 +555,7 @@ func (ss *Sim) ConfigStats() {
s.Range.SetMin(0).SetMax(1)
s.On = true
switch name {
- case "NZero":
+ case "NZero", "UnitErr", "Output":
s.On = false
case "FirstZero", "LastZero":
if level < Run {
@@ -577,19 +580,25 @@ func (ss *Sim) ConfigStats() {
switch level {
case Trial:
out := ss.Net.LayerByName("InputP")
+ trg := ss.Net.LayerByName("Targets")
for di := range ndata {
var stat float64
switch name {
case "CorSim":
stat = 1.0 - float64(axon.LayerStates.Value(int(out.Index), int(di), int(axon.LayerPhaseDiff)))
case "UnitErr":
- stat = out.PctUnitErr(ss.Net.Context())[di]
+ stat = out.PctUnitErr(ctx)[di]
case "Err":
- uniterr := curModeDir.Float64("UnitErr", ndata).Float1D(di)
+ _, minusIndexes, _ := out.LocalistErr4D(ctx)
+ minusIndex := minusIndexes[di]
+ trgExt := axon.Neurons.Value(int(trg.NeurStIndex+uint32(minusIndex)), di, int(axon.Ext))
+ curModeDir.Float64("Output", ndata).SetFloat1D(float64(minusIndex), di)
stat = 1.0
- if uniterr == 0 {
+ if trgExt > 0.5 {
stat = 0
}
+ case "Output":
+ stat = curModeDir.Float64("Output", ndata).Float1D(di)
}
curModeDir.Float64(name, ndata).SetFloat1D(stat, di)
tsr.AppendRowFloat(stat)
@@ -683,10 +692,27 @@ func (ss *Sim) StatCounters(mode, level enums.Enum) string {
//////// GUI
+func (ss *Sim) ConfigNetView(nv *netview.NetView) {
+ // nv.ViewDefaults()
+ // nv.Scene().Camera.Pose.Pos.Set(0, 1.5, 3.0) // more "head on" than default which is more "top down"
+ // nv.Scene().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
+
+ nv.ConfigLabels(ss.Config.Env.InputNames)
+
+ ly := nv.LayerByName("Targets")
+ for li, lnm := range ss.Config.Env.InputNames {
+ lbl := nv.LabelByName(lnm)
+ lbl.Pose = ly.Pose
+ lbl.Pose.Pos.Y += .2
+ lbl.Pose.Pos.Z += .02
+ lbl.Pose.Pos.X += 0.05 + float32(li)*.06
+ lbl.Pose.Scale.SetMul(math32.Vec3(0.6, 0.4, 0.5))
+ }
+}
+
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI() {
- title := "Axon Random Associator"
- ss.GUI.MakeBody(ss, "ra25", title, `This demonstrates a basic Axon model. See emergent on GitHub.
`)
+ ss.GUI.MakeBody(ss, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.FS = ss.Root
ss.GUI.DataRoot = "Root"
ss.GUI.CycleUpdateInterval = 10
@@ -698,18 +724,15 @@ func (ss *Sim) ConfigGUI() {
ss.TestUpdate.Config(nv, axon.Phase, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
- vu.UpdateWhenStopped(mode, level) // todo: carry this all the way through
+ vu.UpdateWhenStopped(mode, level)
}
-
- nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
- nv.SceneXYZ().Camera.LookAt(math32.Vec3(0, 0, 0), math32.Vec3(0, 1, 0))
+ ss.ConfigNetView(nv)
ss.GUI.UpdateFiles()
ss.InitStats()
ss.GUI.FinalizeGUI(false)
}
-// todo: persistent run log
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
@@ -729,7 +752,7 @@ func (ss *Sim) MakeToolbar(p *tree.Plan) {
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
- core.TheApp.OpenURL("https://github.com/emer/axon/blob/main/examples/ra25/README.md")
+ core.TheApp.OpenURL(ss.Config.URL)
},
})
}
diff --git a/examples/deep_fsa/params.go b/examples/deep_fsa/params.go
index 03b753c5c..f5fb7474c 100644
--- a/examples/deep_fsa/params.go
+++ b/examples/deep_fsa/params.go
@@ -82,7 +82,7 @@ var LayerParams = axon.LayerSheets{
ly.Acts.Decay.Act = 0.0
ly.Acts.Decay.Glong = 0.0 // clear long
ly.Acts.Decay.AHP = 0.0 // clear ahp
- ly.Learn.RLRate.SigmoidMin = 1.0 // 1 > 0.05 with CaSpkD as var
+ ly.Learn.RLRate.SigmoidMin = 1.0 // 1 > 0.05 with CaD as var
}},
},
}
diff --git a/examples/dls/dls.go b/examples/dls/dls.go
index 0510af8e3..05260e83c 100644
--- a/examples/dls/dls.go
+++ b/examples/dls/dls.go
@@ -525,7 +525,7 @@ func (ss *Sim) TakeAction(net *axon.Network) {
// DecodeAct decodes the VL ActM state to find closest action pattern
func (ss *Sim) DecodeAct(ev *armaze.Env, di int) armaze.Actions {
- vt := ss.Stats.SetLayerTensor(ss.Net, "VL", "CaSpkD", di) // was "Act"
+ vt := ss.Stats.SetLayerTensor(ss.Net, "VL", "CaD", di) // was "Act"
return armaze.Actions(ss.SoftMaxChoose(ev, vt))
}
@@ -951,7 +951,7 @@ func (ss *Sim) UpdateEnvGUI(mode etime.Modes) {
drv := axon.GlbUSposV(ctx, diu, axon.GvDrives, i)
us := axon.GlbUSposV(ctx, diu, axon.GvUSpos, i)
ofcP := ofcPosUS.Pool(i+1, diu)
- ofc := ofcP.AvgMax.CaSpkD.Plus.Avg * ofcmul
+ ofc := ofcP.AvgMax.CaD.Plus.Avg * ofcmul
dp.SetFloat("Drive", int(i), float64(drv))
dp.SetFloat("USin", int(i), float64(us))
dp.SetFloat("OFC", int(i), float64(ofc))
@@ -961,7 +961,7 @@ func (ss *Sim) UpdateEnvGUI(mode etime.Modes) {
for i := uint32(0); i < nn; i++ {
us := axon.GlbUSneg(ctx, diu, axon.GvUSneg, i)
ofcP := ofcNegUS.Pool(i+1, diu)
- ofc := ofcP.AvgMax.CaSpkD.Plus.Avg * ofcmul
+ ofc := ofcP.AvgMax.CaD.Plus.Avg * ofcmul
dn.SetFloat("USin", int(i), float64(us))
dn.SetFloat("OFC", int(i), float64(ofc))
}
diff --git a/examples/pcore_ds/pcore_ds.go b/examples/pcore_ds/pcore_ds.go
index 44858dd7f..358c1542c 100644
--- a/examples/pcore_ds/pcore_ds.go
+++ b/examples/pcore_ds/pcore_ds.go
@@ -502,7 +502,7 @@ func (ss *Sim) TakeAction(net *axon.Network) {
// DecodeAct decodes the VL ActM state to find closest action pattern
func (ss *Sim) DecodeAct(ev *MotorSeqEnv, di int) int {
- vt := ss.Stats.SetLayerTensor(ss.Net, "MotorBS", "CaSpkPM", di)
+ vt := ss.Stats.SetLayerTensor(ss.Net, "MotorBS", "CaPM", di)
return ss.SoftMaxChoose4D(vt)
// return ss.HardChoose4D(vt)
}
diff --git a/examples/pvlv/pvlv.go b/examples/pvlv/pvlv.go
index 30f3b0923..d9a369874 100644
--- a/examples/pvlv/pvlv.go
+++ b/examples/pvlv/pvlv.go
@@ -515,7 +515,7 @@ func (ss *Sim) TrialStats() {
ss.Stats.SetFloat32("Gated", axon.GlobalScalars[axon.GvVSMatrixJustGated), diu]
ss.Stats.SetFloat32("Time", axon.GlobalScalars[axon.GvTime), diu]
ss.Stats.SetFloat32("GiveUp", axon.GlobalScalars[axon.GvGiveUp), diu]
- ss.Stats.SetFloat32("SC", ss.Net.LayerByName("SC").Pool(0, 0).AvgMax.CaSpkD.Cycle.Max)
+ ss.Stats.SetFloat32("SC", ss.Net.LayerByName("SC").Pool(0, 0).AvgMax.CaD.Cycle.Max)
}
//////////////////////////////////////////////////////////////////////////////
diff --git a/examples/ra25/ra25.go b/examples/ra25/ra25.go
index bc9725a95..e7ba31c2d 100644
--- a/examples/ra25/ra25.go
+++ b/examples/ra25/ra25.go
@@ -38,9 +38,10 @@ import (
)
func main() {
- opts := cli.DefaultOptions("ra25", "Random associator.")
- opts.DefaultFiles = append(opts.DefaultFiles, "config.toml")
cfg := &Config{}
+ cli.SetFromDefaults(cfg)
+ opts := cli.DefaultOptions(cfg.Name, cfg.Title)
+ opts.DefaultFiles = append(opts.DefaultFiles, "config.toml")
cli.Run(opts, cfg, RunSim)
}
@@ -165,6 +166,18 @@ type LogConfig struct {
// Config is a standard Sim config -- use as a starting point.
type Config struct {
+ // Name is the short name of the sim.
+ Name string `default:"RA25"`
+
+ // Title is the longer title of the sim.
+ Title string `default:"Axon random associator"`
+
+ // URL is a link to the online README or other documentation for this sim.
+ URL string `default:"https://github.com/emer/axon/blob/main/examples/ra25/README.md"`
+
+ // Doc is brief documentation of the sim.
+ Doc string `width:"60" default:"This demonstrates a basic Axon model and provides a template for creating new models. It has a random-associator four-layer axon network that uses the standard supervised learning paradigm to learn mappings between 25 random input / output patterns defined over 5x5 input / output layers (i.e., 25 units)."`
+
// Includes has a list of additional config files to include.
// After configuration, it contains list of include files added.
Includes []string
@@ -248,7 +261,7 @@ func RunSim(cfg *Config) error {
func (ss *Sim) Run() {
ss.Root, _ = tensorfs.NewDir("Root")
- ss.Net = axon.NewNetwork("RA25")
+ ss.Net = axon.NewNetwork(ss.Config.Name)
ss.Params.Config(LayerParams, PathParams, ss.Config.Params.Sheet, ss.Config.Params.Tag)
ss.RandSeeds.Init(100) // max 100 runs
ss.InitRandSeed(0)
@@ -808,8 +821,7 @@ func (ss *Sim) StatCounters(mode, level enums.Enum) string {
// ConfigGUI configures the Cogent Core GUI interface for this simulation.
func (ss *Sim) ConfigGUI() {
- title := "Axon Random Associator"
- ss.GUI.MakeBody(ss, "ra25", title, `This demonstrates a basic Axon model. See emergent on GitHub.`)
+ ss.GUI.MakeBody(ss, ss.Config.Name, ss.Config.Title, ss.Config.Doc)
ss.GUI.FS = ss.Root
ss.GUI.DataRoot = "Root"
ss.GUI.CycleUpdateInterval = 10
@@ -821,7 +833,7 @@ func (ss *Sim) ConfigGUI() {
ss.TestUpdate.Config(nv, axon.Phase, ss.StatCounters)
ss.GUI.OnStop = func(mode, level enums.Enum) {
vu := ss.NetViewUpdater(mode)
- vu.UpdateWhenStopped(mode, level) // todo: carry this all the way through
+ vu.UpdateWhenStopped(mode, level)
}
nv.SceneXYZ().Camera.Pose.Pos.Set(0, 1, 2.75) // more "head on" than default which is more "top down"
@@ -832,7 +844,6 @@ func (ss *Sim) ConfigGUI() {
ss.GUI.FinalizeGUI(false)
}
-// todo: persistent run log
func (ss *Sim) MakeToolbar(p *tree.Plan) {
ss.GUI.AddLooperCtrl(p, ss.Loops)
@@ -852,7 +863,7 @@ func (ss *Sim) MakeToolbar(p *tree.Plan) {
Tooltip: "Opens your browser on the README file that contains instructions for how to run this model.",
Active: egui.ActiveAlways,
Func: func() {
- core.TheApp.OpenURL("https://github.com/emer/axon/blob/main/examples/ra25/README.md")
+ core.TheApp.OpenURL(ss.Config.URL)
},
})
}
diff --git a/kinase/linear/linear.go b/kinase/linear/linear.go
index b0decb034..7911179a6 100644
--- a/kinase/linear/linear.go
+++ b/kinase/linear/linear.go
@@ -141,7 +141,7 @@ type Neuron struct {
CaSyn float32
// neuron-level spike-driven Ca integration
- CaSpkM, CaSpkP, CaSpkD float32
+ CaSpkM, CaP, CaD float32
TotalSpikes float32
@@ -154,8 +154,8 @@ func (kn *Neuron) Init() {
kn.SpikeP = 1
kn.CaSyn = 0
kn.CaSpkM = 0
- kn.CaSpkP = 0
- kn.CaSpkD = 0
+ kn.CaP = 0
+ kn.CaD = 0
kn.StartTrial()
}
@@ -180,7 +180,7 @@ func (ls *Linear) Cycle(nr *Neuron, expInt float32, cyc int) {
nr.SpikeBins[bin] += 1
}
}
- ls.Neuron.CaFromSpike(nr.Spike, &nr.CaSyn, &nr.CaSpkM, &nr.CaSpkP, &nr.CaSpkD)
+ ls.Neuron.CaFromSpike(nr.Spike, &nr.CaSyn, &nr.CaSpkM, &nr.CaP, &nr.CaD)
}
// Synapse has Synapse state