Skip to content

Commit

Permalink
towards unified interface
Browse files Browse the repository at this point in the history
  • Loading branch information
lehner committed Nov 6, 2023
1 parent db50c64 commit a3e30cc
Show file tree
Hide file tree
Showing 16 changed files with 219 additions and 106 deletions.
2 changes: 1 addition & 1 deletion lib/gpt/ad/forward/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@
from gpt.ad.forward.infinitesimal import infinitesimal
from gpt.ad.forward.landau import landau
from gpt.ad.forward.series import series
from gpt.ad.forward.transform import norm2, inner_product, cshift
import gpt.ad.forward.foundation
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,22 @@
import gpt as g


def inner_product(sx, sy):
return sx.distribute2(sy, lambda a, b: g.inner_product(a, b))
def inner_product(sx, sy, use_accelerator):
assert len(sx) == 1 and len(sy) == 1
sx = sx[0]
sy = sy[0]
return {(0, 0): sx.distribute2(sy, lambda a, b: g.inner_product(a, b, use_accelerator))}


def norm2(sx):
return inner_product(sx, sx)
assert len(sx) == 1
return [inner_product(sx, sx, True)[0, 0]]


def cshift(sx, mu, disp):
def cshift(sx, mu, disp, none=None):
assert none is None
return sx.distribute1(lambda a: g.cshift(a, mu, disp))


def trace(sx, t):
return sx.distribute1(lambda a: g.trace(a, t))
6 changes: 5 additions & 1 deletion lib/gpt/ad/forward/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
#
import gpt as g
from gpt.ad.forward import infinitesimal
from gpt.ad.forward import foundation
from gpt.core.foundation import base


def promote(other, landau_O):
Expand All @@ -28,7 +30,9 @@ def promote(other, landau_O):
return other


class series:
class series(base):
foundation = foundation

def __init__(self, terms, landau_O):
self.landau_O = landau_O
if not isinstance(terms, dict):
Expand Down
3 changes: 2 additions & 1 deletion lib/gpt/ad/reverse/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,5 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gpt.ad.reverse.node import node, node_base
from gpt.ad.reverse.transform import inner_product, relu, norm2, cshift
import gpt.ad.reverse.transform
import gpt.ad.reverse.foundation
60 changes: 60 additions & 0 deletions lib/gpt/ad/reverse/foundation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
#
# GPT - Grid Python Toolkit
# Copyright (C) 2023 Christoph Lehner ([email protected], https://github.com/lehner/gpt)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import gpt as g
from gpt.ad.reverse.util import accumulate_gradient


def inner_product(x, y):
def _forward():
return g.inner_product(x.value, y.value)

# not allowed to capture z, otherwise have reference loop!
def _backward(z):
if x.with_gradient:
accumulate_gradient(x, y.value * g.adj(z.gradient))
if y.with_gradient:
accumulate_gradient(y, x.value * g.adj(z.gradient))

return g.ad.reverse.node_base(_forward, _backward, (x, y))


def norm2(x):
assert len(x) == 1
return [inner_product(x[0], x[0])]


def cshift(x, direction, displacement, none):
assert none is None

def _forward():
return g.cshift(x.value, direction, displacement)

# not allowed to capture z, otherwise have reference loop!
def _backward(z):
if x.with_gradient:
accumulate_gradient(x, g.cshift(z.gradient, direction, -displacement))

return g.ad.reverse.node_base(_forward, _backward, (x,))


def component_simple_map(operator, numpy_operator, extra_params, first, second):
if operator == "relu":
assert second is None
return g.ad.reverse.transform.relu(first, a=extra_params["a"])
raise Exception(f"component-wise operator {operator} not implemented in rev-AD")
7 changes: 5 additions & 2 deletions lib/gpt/ad/reverse/node.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
#
import gpt as g
from gpt.ad.reverse.util import accumulate_gradient

from gpt.ad.reverse import foundation
from gpt.core.foundation import base

verbose_memory = g.default.is_verbose("ad_memory")

Expand Down Expand Up @@ -75,7 +76,9 @@ def gradient(self, fields, dfields):
# gctr = 0


class node_base:
class node_base(base):
foundation = foundation

def __init__(self, _forward, _backward=lambda z: None, _children=(), with_gradient=True):
# global gctr
# gctr+=1
Expand Down
30 changes: 0 additions & 30 deletions lib/gpt/ad/reverse/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,24 +21,6 @@
from gpt.ad.reverse.util import accumulate_gradient


def inner_product(x, y):
def _forward():
return g.inner_product(x.value, y.value)

# not allowed to capture z, otherwise have reference loop!
def _backward(z):
if x.with_gradient:
accumulate_gradient(x, y.value * g.adj(z.gradient))
if y.with_gradient:
accumulate_gradient(y, x.value * g.adj(z.gradient))

return node_base(_forward, _backward, (x, y))


def norm2(x):
return inner_product(x, x)


def relu(x, a=0.0):
def _forward():
return g.component.relu(a)(x.value)
Expand All @@ -50,15 +32,3 @@ def _backward(z):
accumulate_gradient(x, g.component.multiply(active, z.gradient))

return node_base(_forward, _backward, (x,))


def cshift(x, direction, displacement):
def _forward():
return g.cshift(x.value, direction, displacement)

# not allowed to capture z, otherwise have reference loop!
def _backward(z):
if x.with_gradient:
accumulate_gradient(x, g.cshift(z.gradient, direction, -displacement))

return node_base(_forward, _backward, (x,))
21 changes: 7 additions & 14 deletions lib/gpt/core/component.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,20 +25,13 @@ def _simple_map(operator, numpy_operator=None, extra_params={}):
def _mat(first, second=None):
if isinstance(first, list):
return [_mat(x) for x in first]
if second is not None:
dst = first
src = gpt.eval(second)
else:
if isinstance(first, gpt.tensor):
assert numpy_operator is not None
res = first.new()
res.array = numpy_operator(first.array)
return res
src = gpt.eval(first)
dst = gpt.lattice(src)
for i in dst.otype.v_idx:
cgpt.unary(dst.v_obj[i], src.v_obj[i], {**{"operator": operator}, **extra_params})
return dst
if isinstance(first, gpt.expr):
first = gpt(first)
if isinstance(second, gpt.expr):
second = gpt(second)
return first.__class__.foundation.component_simple_map(
operator, numpy_operator, extra_params, first, second
)

return _mat

Expand Down
5 changes: 5 additions & 0 deletions lib/gpt/core/foundation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,8 @@
#
import gpt.core.foundation.lattice
import gpt.core.foundation.tensor


# base class
class base:
pass
48 changes: 46 additions & 2 deletions lib/gpt/core/foundation/lattice.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,59 @@
#
import gpt
import cgpt
import numpy


def rank_inner_product(a, b, use_accelerator):
a = [gpt.eval(x) for x in a]
b = [gpt.eval(x) for x in b]
otype = a[0].otype
assert len(otype.v_idx) == len(b[0].otype.v_idx)
return cgpt.lattice_rank_inner_product(a, b, use_accelerator)


def inner_product(a, b, use_accelerator):
return a[0].grid.globalsum(rank_inner_product(a, b, use_accelerator))


def norm2(l):
return (
l[0]
.grid.globalsum(
numpy.array(
[rank_inner_product([x], [x], True)[0, 0] for x in l], dtype=numpy.complex128
)
)
.real
)


def cshift(first, second, third, fourth):
if fourth is not None:
l = second
d = third
o = fourth
t = first
else:
l = first
d = second
o = third
t = gpt.lattice(l)

for i in t.otype.v_idx:
cgpt.cshift(t.v_obj[i], l.v_obj[i], d, o)
return t


def trace(l, t):
return gpt.expr(l, t)


def component_simple_map(operator, numpy_operator, extra_params, first, second):
if second is not None:
dst = first
src = second
else:
src = first
dst = gpt.lattice(src)
for i in dst.otype.v_idx:
cgpt.unary(dst.v_obj[i], src.v_obj[i], {**{"operator": operator}, **extra_params})
return dst
20 changes: 20 additions & 0 deletions lib/gpt/core/foundation/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,23 @@ def rank_inner_product(a, b, use_accelerator):

def inner_product(a, b, use_accelerator):
return rank_inner_product(a, b, use_accelerator)


def norm2(a):
res = inner_product(a, a, True).real
ip = numpy.ndarray(dtype=numpy.float64, shape=(len(a),))
for i in range(len(a)):
ip[i] = res[i, i]
return ip


def trace(a, t):
return a.trace(t)


def component_simple_map(operator, numpy_operator, extra_params, first, second):
assert second is None
assert numpy_operator is not None
res = first.new()
res.array = numpy_operator(first.array)
return res
4 changes: 2 additions & 2 deletions lib/gpt/core/lattice.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from gpt.default import is_verbose
from gpt.core.expr import factor
from gpt.core.mem import host
from gpt.core.foundation import lattice as foundation
from gpt.core.foundation import lattice as foundation, base as foundation_base

mem_book = {}
verbose_lattice_creation = is_verbose("lattice_creation")
Expand Down Expand Up @@ -51,7 +51,7 @@ def unpack_cache_key(key):


# lattice class
class lattice(factor):
class lattice(factor, foundation_base):
__array_priority__ = 1000000
cache = {}
foundation = foundation
Expand Down
7 changes: 4 additions & 3 deletions lib/gpt/core/operator/unary.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,12 @@ def apply_expr_unary(l):
def trace(l, t=None):
if t is None:
t = gpt.expr_unary.BIT_SPINTRACE | gpt.expr_unary.BIT_COLORTRACE
if isinstance(l, gpt.tensor):
return l.trace(t)
if isinstance(l, gpt.core.foundation.base):
return l.__class__.foundation.trace(l, t)
elif gpt.util.is_num(l):
return l
return gpt.expr(l, t)
else:
return gpt.expr(l, t)


def spin_trace(l):
Expand Down
4 changes: 2 additions & 2 deletions lib/gpt/core/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@
import cgpt
import gpt
import numpy as np
from gpt.core.foundation import tensor as foundation
from gpt.core.foundation import tensor as foundation, base as foundation_base


class tensor:
class tensor(foundation_base):
foundation = foundation

def __init__(self, first, second=None):
Expand Down
Loading

0 comments on commit a3e30cc

Please sign in to comment.