Skip to content

Commit

Permalink
Implement contrib functions in Python
Browse files Browse the repository at this point in the history
  • Loading branch information
vbkaisetsu committed Aug 4, 2018
1 parent aebd0fb commit 692e87c
Show file tree
Hide file tree
Showing 18 changed files with 127 additions and 115 deletions.
2 changes: 1 addition & 1 deletion primitiv-core
Submodule primitiv-core updated 331 files
2 changes: 1 addition & 1 deletion primitiv/_device.pxd
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
cdef extern from "primitiv/device.h":
cdef extern from "primitiv/core/device.h":
cdef cppclass CppDevice "primitiv::Device":
void dump_description() except +

Expand Down
18 changes: 3 additions & 15 deletions primitiv/_function.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ from primitiv._shape cimport CppShape
from primitiv._parameter cimport CppParameter


cdef extern from "primitiv/functions.h":
cdef extern from "primitiv/core/basic_functions.h":
CppTensor func_input_tensor "primitiv::functions::input_tensor" (const CppShape &shape, const vector[float] &data, CppDevice *dev) except +
CppNode func_input_node "primitiv::functions::input_node" (const CppShape &shape, const vector[float] &data, CppDevice *dev, CppGraph *g) except +
CppTensor func_parameter_tensor "primitiv::functions::parameter_tensor" (CppParameter &param) except +
Expand Down Expand Up @@ -40,12 +40,7 @@ cdef extern from "primitiv/functions.h":
Var func_prelu "primitiv::functions::prelu" [Var](const Var &x, float a) except +
Var func_elu "primitiv::functions::elu" [Var](const Var &x, float a) except +
Var func_selu "primitiv::functions::selu" [Var](const Var &x, float a, float s) except +
CppNode func_sum "primitiv::functions::sum" (const vector[CppNode] &xs) except +
CppTensor func_sum "primitiv::functions::sum" (const vector[CppTensor] &xs) except +
Var func_sum "primitiv::functions::sum" [Var](const Var &x, unsigned dim) except +
CppNode func_mean "primitiv::functions::mean" (const vector[CppNode] &xs) except +
CppTensor func_mean "primitiv::functions::mean" (const vector[CppTensor] &xs) except +
Var func_mean "primitiv::functions::mean" [Var](const Var &x, unsigned dim) except +
Var func_broadcast "primitiv::functions::broadcast" [Var](const Var &x, unsigned dim, unsigned size) except +
Var func_logsumexp "primitiv::functions::logsumexp" [Var](const Var &x, unsigned dim) except +
Var func_log_softmax "primitiv::functions::log_softmax" [Var](const Var &x, unsigned dim) except +
Expand All @@ -58,13 +53,8 @@ cdef extern from "primitiv/functions.h":

CppTensor func_constant_tensor "primitiv::functions::constant_tensor" (const CppShape &shape, float k, CppDevice *dev) except +
CppNode func_constant_node "primitiv::functions::constant_node" (const CppShape &shape, float k, CppDevice *dev, CppGraph *g) except +
CppTensor func_zeros_tensor "primitiv::functions::zeros_tensor" (const CppShape &shape, CppDevice *dev) except +
CppNode func_zeros_node "primitiv::functions::zeros_node" (const CppShape &shape, CppDevice *dev, CppGraph *g) except +
CppTensor func_ones_tensor "primitiv::functions::ones_tensor" (const CppShape &shape, CppDevice *dev) except +
CppNode func_ones_node "primitiv::functions::ones_node" (const CppShape &shape, CppDevice *dev, CppGraph *g) except +
CppTensor func_identity_tensor "primitiv::functions::identity_tensor" (unsigned size, CppDevice *dev) except +
CppNode func_identity_node "primitiv::functions::identity_node" (unsigned size, CppDevice *dev, CppGraph *g) except +
Var func_dropout "primitiv::functions::dropout" [Var](const Var &x, float rate, bool enabled) except +

Var func_positive "primitiv::functions::positive" [Var](const Var &x) except +
Var func_negative "primitiv::functions::negative" [Var](const Var &x) except +
Expand All @@ -82,13 +72,11 @@ cdef extern from "primitiv/functions.h":
Var func_divide "primitiv::functions::divide" [Var](const Var &a, const Var &b) except +


cdef extern from "primitiv/functions.h":
cdef extern from "primitiv/core/basic_functions.h":
Var func_batch_sum "primitiv::functions::batch::sum" [Var](const Var &x) except +
Var func_batch_mean "primitiv::functions::batch::mean" [Var](const Var &x) except +
Var func_batch_normalize "primitiv::functions::batch::normalize" [Var](const Var &x) except +


cdef extern from "primitiv/functions.h":
cdef extern from "primitiv/core/basic_functions.h":

CppNode func_random_bernoulli_node "primitiv::functions::random::bernoulli_node" (const CppShape &shape, float p, CppDevice *dev, CppGraph *g) except +
CppTensor func_random_bernoulli_tensor "primitiv::functions::random::bernoulli_tensor" (const CppShape &shape, float p, CppDevice *dev) except +
Expand Down
189 changes: 107 additions & 82 deletions primitiv/_function.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -158,32 +158,13 @@ class functions:
def elu(Node x, float a):
return wrapNode(func_elu(x.wrapped, a))

@staticmethod
def selu(Node x, float a, float s):
return wrapNode(func_selu(x.wrapped, a, s))

@staticmethod
def sum(x, dim = None):
cdef vector[CppNode] xs
cdef Node node
if isinstance(x, list):
for node in x:
xs.push_back(node.wrapped)
return wrapNode(func_sum(xs))
return functions.sum_list(x)
else:
return wrapNode(func_sum((<Node> x).wrapped, <unsigned> dim))

@staticmethod
def mean(x, dim = None):
cdef vector[CppNode] xs
cdef Node node
if isinstance(x, list):
for node in x:
xs.push_back(node.wrapped)
return wrapNode(func_mean(xs))
else:
return wrapNode(func_mean((<Node> x).wrapped, <unsigned> dim))

@staticmethod
def broadcast(Node x, unsigned dim, unsigned size):
return wrapNode(func_broadcast(x.wrapped, dim, size))
Expand Down Expand Up @@ -243,43 +224,76 @@ class functions:
get_cpp_device(device), get_cpp_graph(graph)))

@staticmethod
def zeros(shape, Device device = None, Graph graph = None):
def identity(unsigned size, Device device = None, Graph graph = None):
if device is None:
device = Device.get_default()
if graph is None:
graph = Graph.get_default()
return wrapNode(func_zeros_node(normShape(shape).wrapped,
get_cpp_device(device), get_cpp_graph(graph)))
return wrapNode(func_identity_node(size, get_cpp_device(device), get_cpp_graph(graph)))

# contrib functions

@staticmethod
def ones(shape, Device device = None, Graph graph = None):
if device is None:
device = Device.get_default()
if graph is None:
graph = Graph.get_default()
return wrapNode(func_ones_node(normShape(shape).wrapped,
get_cpp_device(device), get_cpp_graph(graph)))
def selu(Node x, float a=1.6732632423543772848170429916717, float s=1.0507009873554804934193349852946):
return s * functions.elu(x, a);

@staticmethod
def identity(unsigned size, Device device = None, Graph graph = None):
if device is None:
device = Device.get_default()
if graph is None:
graph = Graph.get_default()
return wrapNode(func_identity_node(size, get_cpp_device(device), get_cpp_graph(graph)))
def sum_list(list xs):
if not xs:
raise TypeError("No nodes to sum.")
ret = xs[0]
for x in xs[1:]:
ret = ret + x
return ret

@staticmethod
def mean(x, dim = None):
if isinstance(x, list):
return functions.sum_list(x) / len(x)
else:
return functions.sum(x, dim) / x.shape()[dim]

@staticmethod
def zeros(shape, Device dev = None, Graph g = None):
return functions.constant(shape, 0.0, dev, g)

@staticmethod
def ones(shape, Device dev = None, Graph g = None):
return functions.constant(shape, 1.0, dev, g)

@staticmethod
def dropout(Node x, float rate, bool enabled):
if not enabled:
return x
if rate == 1.0:
return 0.0 * x
p = 1.0 - rate
return (1.0 / p) * x * functions.random.bernoulli(x.shape(), p, x.device())

# end contrib functions

class batch:
@staticmethod
def sum(Node x):
return wrapNode(func_batch_sum[CppNode](x.wrapped))

# contrib functions

@staticmethod
def mean(Node x):
return wrapNode(func_batch_mean[CppNode](x.wrapped))
return functions.batch.sum(x) / x.shape().batch()

@staticmethod
def normalize(Node x):
return wrapNode(func_batch_normalize[CppNode](x.wrapped))
if not x.shape().has_batch():
return x
b = x.shape().batch()
scale = b / (b - 1)
m = functions.batch.mean(x)
v = scale * (functions.batch.mean(x * x) - m * m)
return (x - m) / functions.sqrt(v + 1e-8)

# end contrib functions

class random:
@staticmethod
Expand Down Expand Up @@ -327,10 +341,6 @@ class functions:
return wrapNode(func_random_gumbel_node(normShape(shape).wrapped, mu, beta,
get_cpp_device(device), get_cpp_graph(graph)))

@staticmethod
def dropout(Node x, float rate, bool enabled):
return wrapNode(func_dropout(x.wrapped, rate, enabled))


class tensor_functions:

Expand Down Expand Up @@ -472,32 +482,13 @@ class tensor_functions:
def elu(Tensor x, float a):
return Tensor.get_wrapper_with_new(new CppTensor(func_elu(x.wrapped[0], a)))

@staticmethod
def selu(Tensor x, float a, float s):
return Tensor.get_wrapper_with_new(new CppTensor(func_selu(x.wrapped[0], a, s)))

@staticmethod
def sum(x, dim = None):
cdef vector[CppTensor] xs
cdef Tensor t
if isinstance(x, list):
for t in x:
xs.push_back(t.wrapped[0])
return Tensor.get_wrapper_with_new(new CppTensor(func_sum(xs)))
return tensor_functions.sum_list(x)
else:
return Tensor.get_wrapper_with_new(new CppTensor(func_sum((<Tensor> x).wrapped[0], <unsigned> dim)))

@staticmethod
def mean(x, dim = None):
cdef vector[CppTensor] xs
cdef Tensor t
if isinstance(x, list):
for t in x:
xs.push_back(t.wrapped[0])
return Tensor.get_wrapper_with_new(new CppTensor(func_mean(xs)))
else:
return Tensor.get_wrapper_with_new(new CppTensor(func_mean((<Tensor> x).wrapped[0], <unsigned> dim)))

@staticmethod
def broadcast(Tensor x, unsigned dim, unsigned size):
return Tensor.get_wrapper_with_new(new CppTensor(func_broadcast(x.wrapped[0], dim, size)))
Expand Down Expand Up @@ -554,38 +545,76 @@ class tensor_functions:
return Tensor.get_wrapper_with_new(new CppTensor(func_constant_tensor(normShape(shape).wrapped, k,
get_cpp_device(device))))


@staticmethod
def zeros(shape, Device device = None):
def identity(unsigned size, Device device = None):
if device is None:
device = Device.get_default()
return Tensor.get_wrapper_with_new(new CppTensor(func_zeros_tensor(normShape(shape).wrapped,
get_cpp_device(device))))
return Tensor.get_wrapper_with_new(new CppTensor(func_identity_tensor(size, get_cpp_device(device))))

# contrib functions

@staticmethod
def ones(shape, Device device = None):
if device is None:
device = Device.get_default()
return Tensor.get_wrapper_with_new(new CppTensor(func_ones_tensor(normShape(shape).wrapped,
get_cpp_device(device))))
def selu(Node x, float a=1.6732632423543772848170429916717, float s=1.0507009873554804934193349852946):
return s * tensor_functions.elu(x, a);

@staticmethod
def identity(unsigned size, Device device = None):
if device is None:
device = Device.get_default()
return Tensor.get_wrapper_with_new(new CppTensor(func_identity_tensor(size, get_cpp_device(device))))
def sum_list(list xs):
if not xs:
raise TypeError("No nodes to sum.")
ret = xs[0]
for x in xs[1:]:
ret = ret + x
return ret

@staticmethod
def mean(x, dim = None):
if isinstance(x, list):
return tensor_functions.sum_list(x) / len(x)
else:
return tensor_functions.sum(x, dim) / x.shape()[dim]

@staticmethod
def zeros(shape, Device dev = None):
return tensor_functions.constant(shape, 0.0, dev)

@staticmethod
def ones(shape, Device dev = None):
return tensor_functions.constant(shape, 1.0, dev)

@staticmethod
def dropout(Node x, float rate, bool enabled):
if not enabled:
return x
if rate == 1.0:
return 0.0 * x
p = 1.0 - rate
return (1.0 / p) * x * tensor_functions.random.bernoulli(x.shape(), p, x.device())

# end contrib functions

class batch:
@staticmethod
def sum(Tensor x):
return Tensor.get_wrapper_with_new(new CppTensor(func_batch_sum[CppTensor](x.wrapped[0])))

# contrib functions

@staticmethod
def mean(Tensor x):
return Tensor.get_wrapper_with_new(new CppTensor(func_batch_mean[CppTensor](x.wrapped[0])))
def mean(Node x):
return tensor_functions.batch.sum(x) / x.shape().batch()

@staticmethod
def normalize(Tensor x):
return Tensor.get_wrapper_with_new(new CppTensor(func_batch_normalize[CppTensor](x.wrapped[0])))
def normalize(Node x):
if not x.shape().has_batch():
return x
b = x.shape().batch()
scale = b / (b - 1)
m = tensor_functions.batch.mean(x)
v = scale * (tensor_functions.batch.mean(x * x) - m * m)
return (x - m) / tensor_functions.sqrt(v + 1e-8)

# end contrib functions

class random:
@staticmethod
Expand Down Expand Up @@ -622,7 +651,3 @@ class tensor_functions:
device = Device.get_default()
return Tensor.get_wrapper_with_new(new CppTensor(func_random_gumbel_tensor(normShape(shape).wrapped, mu, beta,
get_cpp_device(device))))

@staticmethod
def dropout(Tensor x, float rate, bool enabled):
return Tensor.get_wrapper_with_new(new CppTensor(func_dropout(x.wrapped[0], rate, enabled)))
4 changes: 2 additions & 2 deletions primitiv/_graph.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ from primitiv._shape cimport CppShape
from primitiv._tensor cimport CppTensor


cdef extern from "primitiv/graph.h" nogil:
cdef extern from "primitiv/core/graph.h" nogil:
cdef cppclass CppNode "primitiv::Node":
CppNode(CppNode &&src) except +
CppNode() except +
Expand All @@ -23,7 +23,7 @@ cdef extern from "primitiv/graph.h" nogil:
void backward() except +


cdef extern from "primitiv/graph.h" nogil:
cdef extern from "primitiv/core/graph.h" nogil:
cdef cppclass CppGraph "primitiv::Graph":
CppGraph() except +
void clear() except +
Expand Down
2 changes: 1 addition & 1 deletion primitiv/_initializer.pxd
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from primitiv._tensor cimport CppTensor


cdef extern from "primitiv/initializer.h":
cdef extern from "primitiv/core/initializer.h":
cdef cppclass CppInitializer "primitiv::Initializer":
CppInitializer() except +
void apply(CppTensor &x) except +
Expand Down
2 changes: 1 addition & 1 deletion primitiv/_model.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ from primitiv._device cimport CppDevice
from primitiv._parameter cimport CppParameter


cdef extern from "primitiv/model.h":
cdef extern from "primitiv/core/model.h":
cdef cppclass CppModel "primitiv::Model":
CppModel() except +
void load(string &path, bool with_stats, CppDevice *device) except +
Expand Down
2 changes: 1 addition & 1 deletion primitiv/_optimizer.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ from primitiv._parameter cimport CppParameter, Parameter
from primitiv._shape cimport CppShape


cdef extern from "primitiv/optimizer.h":
cdef extern from "primitiv/core/optimizer.h":
cdef cppclass CppOptimizer "primitiv::Optimizer":
CppOptimizer(CppOptimizer &&) except +
CppOptimizer() except +
Expand Down
2 changes: 1 addition & 1 deletion primitiv/_parameter.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ from primitiv._device cimport CppDevice
from primitiv._initializer cimport CppInitializer, Initializer


cdef extern from "primitiv/parameter.h":
cdef extern from "primitiv/core/parameter.h":
cdef cppclass CppParameter "primitiv::Parameter":
CppParameter() except +
CppParameter(const CppShape &shape, const vector[float] &value, CppDevice *device) except +
Expand Down
2 changes: 1 addition & 1 deletion primitiv/_shape.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ from libcpp.string cimport string
from libcpp cimport bool


cdef extern from "primitiv/shape.h":
cdef extern from "primitiv/core/shape.h":
cdef cppclass CppShape "primitiv::Shape":
CppShape() except +
CppShape(vector[unsigned] &dims, unsigned batch) except +
Expand Down
Loading

0 comments on commit 692e87c

Please sign in to comment.