Skip to content

Commit

Permalink
channel plots all updated to just generate the plot data; added examp…
Browse files Browse the repository at this point in the history
…le/equations to actually view; now compatible with web docs.
  • Loading branch information
rcoreilly committed Dec 20, 2024
1 parent bd940b9 commit 957c15b
Show file tree
Hide file tree
Showing 186 changed files with 21,198 additions and 21,775 deletions.
7 changes: 6 additions & 1 deletion axon/shaders/CycleNeuron.wgsl
Original file line number Diff line number Diff line change
Expand Up @@ -987,9 +987,14 @@ struct KirParams {
fn KirParams_Minf(kp: ptr<function,KirParams>, vbio: f32) -> f32 {
return 1.0 / (1.0 + FastExp((vbio-((*kp).MinfOff))/(*kp).MinfTau));
}
fn KirParams_MTau(kp: ptr<function,KirParams>, vbio: f32) -> f32 {
var alpha = 0.1 * FastExp((vbio-((*kp).RiseOff))/(-(*kp).RiseTau));
var beta = 0.27 / (1.0 + FastExp((vbio-((*kp).DecayOff))/(-(*kp).DecayTau)));
var sum = alpha + beta;return 1.0 / sum;
}
fn KirParams_DM(kp: ptr<function,KirParams>, vbio: f32,m: f32) -> f32 {
var minf = KirParams_Minf(kp, vbio);
var mtau = f32(4.0);
var mtau = KirParams_MTau(kp, vbio);
var dm = (minf - m) / (mtau * 3); // 3 = Q10
return dm;
}
Expand Down
64 changes: 32 additions & 32 deletions axon/simstats.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ import (

// StatsNode returns tensorfs Dir Node for given mode, level.
func StatsNode(statsDir *tensorfs.Node, mode, level enums.Enum) *tensorfs.Node {
modeDir := statsDir.RecycleDir(mode.String())
return modeDir.RecycleDir(level.String())
modeDir := statsDir.Dir(mode.String())
return modeDir.Dir(level.String())
}

func StatsLayerValues(net *Network, curDir *tensorfs.Node, mode enums.Enum, di int, layName, varName string) *tensor.Float32 {
curModeDir := curDir.RecycleDir(mode.String())
curModeDir := curDir.Dir(mode.String())
ly := net.LayerByName(layName)
tsr := curModeDir.Float32(layName+"_"+varName, ly.Shape.Sizes...)
ly.UnitValuesTensor(tsr, varName, di)
Expand Down Expand Up @@ -147,9 +147,9 @@ func StatLoopCounters(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, ne
}
name := prefix + lev.String() // name of stat = level
ndata := int(net.Context().NData)
modeDir := statsDir.RecycleDir(mode.String())
curModeDir := currentDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
modeDir := statsDir.Dir(mode.String())
curModeDir := currentDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.Int(name)
if start {
tsr.SetNumRows(0)
Expand Down Expand Up @@ -198,8 +198,8 @@ func StatLoopCounters(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, ne
func StatRunName(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, net *Network, trialLevel enums.Enum, exclude ...enums.Enum) func(mode, level enums.Enum, start bool) {
return func(mode, level enums.Enum, start bool) {
name := "RunName"
modeDir := statsDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.StringValue(name)
ndata := int(net.Context().NData)
runNm := currentDir.StringValue(name, 1).String1D(0)
Expand Down Expand Up @@ -227,9 +227,9 @@ func StatTrialName(statsDir, currentDir *tensorfs.Node, ls *looper.Stacks, net *
return
}
name := "TrialName"
modeDir := statsDir.RecycleDir(mode.String())
curModeDir := currentDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
modeDir := statsDir.Dir(mode.String())
curModeDir := currentDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.StringValue(name)
ndata := int(net.Context().NData)
if start {
Expand Down Expand Up @@ -257,8 +257,8 @@ func StatPerTrialMSec(statsDir *tensorfs.Node, trainMode enums.Enum, trialLevel
}
levels[levi] = level
name := "PerTrialMSec"
modeDir := statsDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
tsr := levelDir.Float64(name)
if start {
tsr.SetNumRows(0)
Expand All @@ -270,14 +270,14 @@ func StatPerTrialMSec(statsDir *tensorfs.Node, trainMode enums.Enum, trialLevel
switch levi {
case 1:
epcTimer.Stop()
subd := modeDir.RecycleDir(levels[0].String())
subd := modeDir.Dir(levels[0].String())
trls := errors.Ignore1(subd.Values())[0] // must be a stat
epcTimer.N = trls.Len()
pertrl := float64(epcTimer.Avg()) / float64(time.Millisecond)
tsr.AppendRowFloat(pertrl)
epcTimer.ResetStart()
default:
subd := modeDir.RecycleDir(levels[levi-1].String())
subd := modeDir.Dir(levels[levi-1].String())
stat := stats.StatMean.Call(subd.Value(name))
tsr.AppendRow(stat)
}
Expand All @@ -298,8 +298,8 @@ func StatLayerActGe(statsDir *tensorfs.Node, net *Network, trainMode, trialLevel
return
}
levels[levi] = level
modeDir := statsDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
ndata := net.Context().NData
for _, lnm := range layerNames {
for si, statName := range statNames {
Expand Down Expand Up @@ -329,7 +329,7 @@ func StatLayerActGe(statsDir *tensorfs.Node, net *Network, trainMode, trialLevel
tsr.AppendRowFloat(float64(stat))
}
default:
subd := modeDir.RecycleDir(levels[levi-1].String())
subd := modeDir.Dir(levels[levi-1].String())
stat := stats.StatMean.Call(subd.Value(name))
tsr.AppendRow(stat)
}
Expand All @@ -347,8 +347,8 @@ func StatLayerState(statsDir *tensorfs.Node, net *Network, smode, slevel enums.E
if mode.Int64() != smode.Int64() || level.Int64() != slevel.Int64() {
return
}
modeDir := statsDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
ndata := int(net.Context().NData)
if !isTrialLevel {
ndata = 1
Expand Down Expand Up @@ -393,10 +393,10 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
return
}
levels[levi] = level
modeDir := statsDir.RecycleDir(mode.String())
curModeDir := currentDir.RecycleDir(mode.String())
pcaDir := statsDir.RecycleDir("PCA")
levelDir := modeDir.RecycleDir(level.String())
modeDir := statsDir.Dir(mode.String())
curModeDir := currentDir.Dir(mode.String())
pcaDir := statsDir.Dir("PCA")
levelDir := modeDir.Dir(level.String())
ndata := int(net.Context().NData)
for _, lnm := range layerNames {
ly := net.LayerByName(lnm)
Expand Down Expand Up @@ -470,7 +470,7 @@ func StatPCA(statsDir, currentDir *tensorfs.Node, net *Network, interval int, tr
}
tsr.AppendRowFloat(float64(stat))
default:
subd := modeDir.RecycleDir(levels[levi-1].String())
subd := modeDir.Dir(levels[levi-1].String())
stat := stats.StatMean.Call(subd.Value(name))
tsr.AppendRow(stat)
}
Expand All @@ -491,9 +491,9 @@ func StatPrevCorSim(statsDir, currentDir *tensorfs.Node, net *Network, trialLeve
return
}
levels[levi] = level
modeDir := statsDir.RecycleDir(mode.String())
curModeDir := currentDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
modeDir := statsDir.Dir(mode.String())
curModeDir := currentDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
ndata := int(net.Context().NData)
for _, lnm := range layerNames {
for si, statName := range statNames {
Expand Down Expand Up @@ -533,7 +533,7 @@ func StatPrevCorSim(statsDir, currentDir *tensorfs.Node, net *Network, trialLeve
tsr.AppendRowFloat(stat)
}
default:
subd := modeDir.RecycleDir(levels[levi-1].String())
subd := modeDir.Dir(levels[levi-1].String())
stat := stats.StatMean.Call(subd.Value(name))
tsr.AppendRow(stat)
}
Expand All @@ -551,9 +551,9 @@ func StatLevelAll(statsDir *tensorfs.Node, srcMode, srcLevel enums.Enum, styleFu
if srcMode.Int64() != mode.Int64() || srcLevel.Int64() != level.Int64() {
return
}
modeDir := statsDir.RecycleDir(mode.String())
levelDir := modeDir.RecycleDir(level.String())
allDir := modeDir.RecycleDir(level.String() + "All")
modeDir := statsDir.Dir(mode.String())
levelDir := modeDir.Dir(level.String())
allDir := modeDir.Dir(level.String() + "All")
cols := levelDir.NodesFunc(nil) // all nodes
for _, cl := range cols {
clv := cl.Tensor.(tensor.Values)
Expand Down
6 changes: 4 additions & 2 deletions chans/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ This package implements two complementary conductances, GABA-B / GIRK and NMDA,

## GABA-B / GIRK

GABA-B is an inhibitory channel activated by the usual GABA inhibitory neurotransmitter, which is coupled to the GIRK *G-protein coupled inwardly rectifying potassium (K) channel*. It is ubiquitous in the brain, and is likely essential for basic neural function (especially in spiking networks from a computational perspective). The inward rectification is caused by a Mg+ ion block *from the inside* of the neuron, which means that these channels are most open when the neuron is hyperpolarized (inactive), and thus it serves to *keep inactive neurons inactive*.
GABA-B is an inhibitory channel activated by the usual GABA inhibitory neurotransmitter, which is coupled to the GIRK *G-protein coupled inwardly rectifying potassium (K) channel*. It is ubiquitous in the brain, and is likely essential for basic neural function (especially in spiking networks from a computational perspective). The inward rectification is caused by a Mg+ ion block *from the inside* of the neuron, which means that these channels are most open when the neuron is hyperpolarized (inactive), and thus it serves to *keep inactive neurons inactive*. Implementation based on [Thomson & Destexhe, 1999](#references).

In standard Leabra rate-code neurons using FFFB inhibition, the continuous nature of the GABA-A type inhibition serves this function already, so these GABA-B channels have not been as important, but whenever a discrete spiking function has been used along with FFFB inhibition or direct interneuron inhibition, there is a strong tendency for every neuron to fire at some point, in a rolling fashion, because neurons that are initially inhibited during the first round of firing can just pop back up once that initial wave of associated GABA-A inhibition passes. This is especially problematic for untrained networks where excitatory connections are not well differentiated, and neurons are receiving very similar levels of excitatory input. In this case, learning does not have the ability to further differentiate the neurons, and does not work effectively.
In the original Leabra rate-code neurons using FFFB inhibition, the continuous nature of the GABA-A type inhibition serves this function already, so these GABA-B channels have not been as important, but whenever a discrete spiking function has been used along with FFFB inhibition or direct interneuron inhibition, there is a strong tendency for every neuron to fire at some point, in a rolling fashion, because neurons that are initially inhibited during the first round of firing can just pop back up once that initial wave of associated GABA-A inhibition passes. This is especially problematic for untrained networks where excitatory connections are not well differentiated, and neurons are receiving very similar levels of excitatory input. In this case, learning does not have the ability to further differentiate the neurons, and does not work effectively.

## NMDA

Expand Down Expand Up @@ -225,6 +225,8 @@ Magee98: Overall, Ih acts to dampen dendritic excitability, but its largest impa

* Sanders, H., Berends, M., Major, G., Goldman, M. S., & Lisman, J. E. (2013). NMDA and GABAB (KIR) Conductances: The “Perfect Couple” for Bistability. Journal of Neuroscience, 33(2), 424–429. https://doi.org/10.1523/JNEUROSCI.1854-12.2013

* Thomson AM, Destexhe A (1999) Dual intracellular recordings and computational models of slow inhibitory postsynaptic potentials in rat neocortical and hippocampal slices. Neuroscience 92:1193–1215.

* Urakubo, H., Honda, M., Froemke, R. C., & Kuroda, S. (2008). Requirement of an allosteric kinetics of NMDA receptors for spike timing-dependent plasticity. *The Journal of Neuroscience, 28(13),* 3310–3323. http://www.ncbi.nlm.nih.gov/pubmed/18367598

* Wang, B., Jaffe, D. B., & Brenner, R. (2014). Current understanding of iberiotoxin-resistant BK channels in the nervous system. Frontiers in Physiology, 5. https://www.frontiersin.org/articles/10.3389/fphys.2014.00382
Expand Down
Loading

0 comments on commit 957c15b

Please sign in to comment.