forked from davidgjiang/smart-pixels-ml
-
Notifications
You must be signed in to change notification settings - Fork 0
/
models.py
68 lines (64 loc) · 2.46 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import keras
from keras.layers import *
from keras.models import Sequential, Model
from keras.utils import Sequence
from qkeras import *
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
def var_network(var, hidden=10, output=2):
var = Flatten()(var)
var = QDense(
hidden,
kernel_quantizer=quantized_bits(8, 0, alpha=1),
bias_quantizer=quantized_bits(8, 0, alpha=1),
kernel_regularizer=tf.keras.regularizers.L1L2(0.01),
activity_regularizer=tf.keras.regularizers.L2(0.01),
)(var)
var = QActivation("quantized_tanh(8, 0, 1)")(var)
var = QDense(
hidden,
kernel_quantizer=quantized_bits(8, 0, alpha=1),
bias_quantizer=quantized_bits(8, 0, alpha=1),
kernel_regularizer=tf.keras.regularizers.L1L2(0.01),
activity_regularizer=tf.keras.regularizers.L2(0.01),
)(var)
var = QActivation("quantized_tanh(8, 0, 1)")(var)
return QDense(
output,
kernel_quantizer=quantized_bits(8, 0, alpha=1),
bias_quantizer=quantized_bits(8, 0, alpha=1),
kernel_regularizer=tf.keras.regularizers.L1L2(0.01),
)(var)
def conv_network(var, n_filters=5, kernel_size=3):
var = QSeparableConv2D(
n_filters,kernel_size,
depthwise_quantizer=quantized_bits(4, 0, 1, alpha=1),
pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1),
bias_quantizer=quantized_bits(4, 0, alpha=1),
depthwise_regularizer=tf.keras.regularizers.L1L2(0.01),
pointwise_regularizer=tf.keras.regularizers.L1L2(0.01),
activity_regularizer=tf.keras.regularizers.L2(0.01),
)(var)
var = QActivation("quantized_tanh(4, 0, 1)")(var)
var = QConv2D(
n_filters,1,
kernel_quantizer=quantized_bits(4, 0, alpha=1),
bias_quantizer=quantized_bits(4, 0, alpha=1),
kernel_regularizer=tf.keras.regularizers.L1L2(0.01),
activity_regularizer=tf.keras.regularizers.L2(0.01),
)(var)
var = QActivation("quantized_tanh(4, 0, 1)")(var)
return var
def CreateModel(shape, n_filters, pool_size):
x_base = x_in = Input(shape)
stack = conv_network(x_base)
stack = AveragePooling2D(
pool_size=(pool_size, pool_size),
strides=None,
padding="valid",
data_format=None,
)(stack)
stack = QActivation("quantized_bits(8, 0, alpha=1)")(stack)
stack = var_network(stack, hidden=16, output=14)
model = Model(inputs=x_in, outputs=stack)
return model