Skip to content

Commit

Permalink
object oriented foundation module will allow for uniform application …
Browse files Browse the repository at this point in the history
…of algorithms
  • Loading branch information
lehner committed Nov 6, 2023
1 parent 1764f00 commit db50c64
Show file tree
Hide file tree
Showing 10 changed files with 118 additions and 33 deletions.
1 change: 1 addition & 0 deletions lib/gpt/core/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import gpt.core.foundation
from gpt.core.quadruple_precision import qfloat, qfloat_array, qcomplex, qcomplex_array
from gpt.core.grid import grid, grid_from_description, full, redblack, general
from gpt.core.precision import single, double, double_quadruple, precision, str_to_precision
Expand Down
20 changes: 20 additions & 0 deletions lib/gpt/core/foundation/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#
# GPT - Grid Python Toolkit
# Copyright (C) 2023 Christoph Lehner ([email protected], https://github.com/lehner/gpt)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import gpt.core.foundation.lattice
import gpt.core.foundation.tensor
32 changes: 32 additions & 0 deletions lib/gpt/core/foundation/lattice.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#
# GPT - Grid Python Toolkit
# Copyright (C) 2023 Christoph Lehner ([email protected], https://github.com/lehner/gpt)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import gpt
import cgpt


def rank_inner_product(a, b, use_accelerator):
a = [gpt.eval(x) for x in a]
b = [gpt.eval(x) for x in b]
otype = a[0].otype
assert len(otype.v_idx) == len(b[0].otype.v_idx)
return cgpt.lattice_rank_inner_product(a, b, use_accelerator)


def inner_product(a, b, use_accelerator):
return a[0].grid.globalsum(rank_inner_product(a, b, use_accelerator))
28 changes: 28 additions & 0 deletions lib/gpt/core/foundation/tensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#
# GPT - Grid Python Toolkit
# Copyright (C) 2023 Christoph Lehner ([email protected], https://github.com/lehner/gpt)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import gpt
import numpy


def rank_inner_product(a, b, use_accelerator):
return numpy.array([[gpt.trace(gpt.adj(x) * y) for y in b] for x in a], dtype=numpy.complex128)


def inner_product(a, b, use_accelerator):
return rank_inner_product(a, b, use_accelerator)
2 changes: 2 additions & 0 deletions lib/gpt/core/lattice.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from gpt.default import is_verbose
from gpt.core.expr import factor
from gpt.core.mem import host
from gpt.core.foundation import lattice as foundation

mem_book = {}
verbose_lattice_creation = is_verbose("lattice_creation")
Expand Down Expand Up @@ -53,6 +54,7 @@ def unpack_cache_key(key):
class lattice(factor):
__array_priority__ = 1000000
cache = {}
foundation = foundation

def __init__(self, first, second=None, third=None):
self.metadata = {}
Expand Down
2 changes: 2 additions & 0 deletions lib/gpt/core/operator/unary.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ def trace(l, t=None):
t = gpt.expr_unary.BIT_SPINTRACE | gpt.expr_unary.BIT_COLORTRACE
if isinstance(l, gpt.tensor):
return l.trace(t)
elif gpt.util.is_num(l):
return l
return gpt.expr(l, t)


Expand Down
3 changes: 3 additions & 0 deletions lib/gpt/core/quadruple_precision/qcomplex_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,9 @@ def __repr__(self):
def __array__(self, dtype=None):
return NotImplemented

def __getitem__(self, index):
return gcomplex(self.real[index], self.imag[index])

def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if ufunc not in HANDLED_FUNCTIONS or method != "__call__":
return NotImplemented
Expand Down
5 changes: 1 addition & 4 deletions lib/gpt/core/quadruple_precision/qfloat_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,7 @@ def __array__(self, dtype=None):
return NotImplemented

def __getitem__(self, index):
if isinstance(index, int):
return g.qfloat(self.x[index], self.y[index])
else:
raise NotImplementedError(f"Array slicing not yet implemented: {index}")
return g.qfloat(self.x[index], self.y[index])

def __float__(self):
if self.x.shape != (1,):
Expand Down
3 changes: 3 additions & 0 deletions lib/gpt/core/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,12 @@
import cgpt
import gpt
import numpy as np
from gpt.core.foundation import tensor as foundation


class tensor:
foundation = foundation

def __init__(self, first, second=None):
if second is not None:
array, otype = first, second
Expand Down
55 changes: 26 additions & 29 deletions lib/gpt/core/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,50 +59,47 @@ def copy(first, second=None):
return t


def rank_inner_product(a, b, use_accelerator=True):
def call_binary_aa_num(functional, a, b):
return_list = (isinstance(a, list)) or (isinstance(b, list))
a = gpt.util.to_list(a)
b = gpt.util.to_list(b)
if isinstance(a[0], gpt.tensor) and isinstance(b[0], gpt.tensor):
res = numpy.array([[gpt.adj(x) * y for y in b] for x in a], dtype=numpy.complex128)
else:
a = [gpt.eval(x) for x in a]
b = [gpt.eval(x) for x in b]
otype = a[0].otype
assert len(otype.v_idx) == len(b[0].otype.v_idx)
res = cgpt.lattice_rank_inner_product(a, b, use_accelerator)
res = functional(a, b)
if return_list:
return res
return gpt.util.to_num(res[0, 0])


def inner_product(a, b):
if isinstance(a, gpt.tensor):
return gpt.trace(gpt.adj(a) * b)
grid = gpt.util.to_list(a)[0].grid
return grid.globalsum(rank_inner_product(a, b))
def rank_inner_product(a, b, use_accelerator=True):
return call_binary_aa_num(
lambda la, lb: la[0].__class__.foundation.rank_inner_product(la, lb, use_accelerator), a, b
)


def inner_product(a, b, use_accelerator=True):
return call_binary_aa_num(
lambda la, lb: la[0].__class__.foundation.inner_product(la, lb, use_accelerator), a, b
)


def norm2(l):
l = gpt.eval(l)
return_list = isinstance(l, list)
l = gpt.util.to_list(l)
l_lattices = [(i, l[i]) for i in range(len(l)) if isinstance(l[i], gpt.lattice)]
l_tensors = [(i, l[i]) for i in range(len(l)) if isinstance(l[i], gpt.tensor)]
if len(l_lattices) > 0:
ip_l = (
l_lattices[0][1]
.grid.globalsum(
numpy.array([rank_inner_product(x, x) for i, x in l_lattices], dtype=numpy.complex128)
)
.real
)
ip_t = [x.norm2().real for i, x in l_tensors]
objects = {}
indices = {}
for n, x in enumerate(l):
fnd = x.foundation
if x not in objects:
objects[fnd] = []
indices[fnd] = []
objects[fnd].append(x)
indices[fnd].append(n)
ip = numpy.ndarray(dtype=numpy.float64, shape=(len(l),))
for i, j in enumerate(l_lattices):
ip[j[0]] = ip_l[i]
for i, j in enumerate(l_tensors):
ip[j[0]] = ip_t[i]
for fnd in objects:
obj = objects[fnd]
ip_o = fnd.inner_product(obj, obj, True).real
for n, i in enumerate(indices[fnd]):
ip[i] = ip_o[n]
if return_list:
return ip
return gpt.util.to_num(ip[0])
Expand Down

0 comments on commit db50c64

Please sign in to comment.