-
Notifications
You must be signed in to change notification settings - Fork 103
/
LinearRegressionEager.cs
106 lines (89 loc) · 3.88 KB
/
LinearRegressionEager.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
using Tensorflow.NumPy;
using static Tensorflow.Binding;
using static Tensorflow.KerasApi;
namespace TensorFlowNET.Examples
{
/// <summary>
/// A linear regression learning algorithm example using TensorFlow library.
/// https://github.com/aymericdamien/TensorFlow-Examples/blob/master/tensorflow_v2/notebooks/2_BasicModels/linear_regression.ipynb
/// </summary>
public class LinearRegressionEager : SciSharpExample, IExample
{
int training_steps = 1000;
// Parameters
float learning_rate = 0.01f;
int display_step = 100;
NDArray train_X, train_Y;
int n_samples;
public ExampleConfig InitConfig()
=> Config = new ExampleConfig
{
Name = "Linear Regression (Eager)",
Enabled = true,
IsImportingGraph = false
};
public bool Run()
{
tf.enable_eager_execution();
// Training Data
PrepareData();
Train();
return true;
}
public override void Train()
{
// Set model weights
// We can set a fixed init value in order to debug
// var rnd1 = rng.randn<float>();
// var rnd2 = rng.randn<float>();
var W = tf.Variable(-0.06f, name: "weight");
var b = tf.Variable(-0.73f, name: "bias");
var optimizer = keras.optimizers.SGD(learning_rate);
// Run training for the given number of steps.
foreach (var step in range(1, training_steps + 1))
{
// Run the optimization to update W and b values.
// Wrap computation inside a GradientTape for automatic differentiation.
using var g = tf.GradientTape();
// Linear regression (Wx + b).
var pred = W * train_X + b;
// Mean square error.
var sub = pred - train_Y;
var p = tf.pow(sub, 2);
var s = tf.reduce_sum(p);
var loss = s / (2 * n_samples);
// should stop recording
// Compute gradients.
var gradients = g.gradient(loss, (W, b));
// Update W and b following gradients.
optimizer.apply_gradients(zip(gradients, (W, b)));
if (step % display_step == 0)
{
pred = W * train_X + b;
loss = tf.reduce_sum(tf.pow(pred - train_Y, 2)) / (2 * n_samples);
print($"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}");
}
}
}
public override void PrepareData()
{
train_X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f);
train_Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f);
n_samples = (int)train_X.shape[0];
}
}
}