-
Notifications
You must be signed in to change notification settings - Fork 1
/
neural.go
62 lines (50 loc) · 1.4 KB
/
neural.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
package neural
import (
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// TrainExample represents input-output pair of signals to train on or verify the training
type TrainExample struct {
Input []float64
Output []float64
}
// Evaluator wraps main tasks of NN, evaluate input data
type Evaluator interface {
Evaluate(input []float64) []float64
Layers() []Layer
}
type network struct {
layers []Layer
}
// NewNeuralNetwork initializes neural network structure of neurons (counts) and layer factories
func NewNeuralNetwork(neurons []int, layersFactories ...LayerFactory) Evaluator {
if len(neurons)-1 != len(layersFactories) {
panic("Neuron counts does not match layers count")
}
layers := make([]Layer, len(layersFactories), len(layersFactories))
for i, factory := range layersFactories {
layers[i] = factory(neurons[i], neurons[i+1])
}
return &network{
layers: layers,
}
}
// Evaluate calculates network answer for given input signal
func (n *network) Evaluate(input []float64) []float64 {
output := input
for _, layer := range n.layers {
n, _, _ := layer.Shapes()
potentials := make([]float64, n, n)
layer.Forward(potentials, output)
output = make([]float64, n, n)
layer.Activator().Activation(output, potentials)
}
return output
}
// Layers exposes list of layers within network. Used in training only
func (n *network) Layers() []Layer {
return n.layers
}