From db50c6469e7dbcd5ec5d04d6969e525e5c1985b6 Mon Sep 17 00:00:00 2001 From: Christoph Lehner Date: Mon, 6 Nov 2023 09:55:26 +0100 Subject: [PATCH] object oriented foundation module will allow for uniform application of algorithms --- lib/gpt/core/__init__.py | 1 + lib/gpt/core/foundation/__init__.py | 20 +++++++ lib/gpt/core/foundation/lattice.py | 32 +++++++++++ lib/gpt/core/foundation/tensor.py | 28 ++++++++++ lib/gpt/core/lattice.py | 2 + lib/gpt/core/operator/unary.py | 2 + .../quadruple_precision/qcomplex_array.py | 3 + .../core/quadruple_precision/qfloat_array.py | 5 +- lib/gpt/core/tensor.py | 3 + lib/gpt/core/transform.py | 55 +++++++++---------- 10 files changed, 118 insertions(+), 33 deletions(-) create mode 100644 lib/gpt/core/foundation/__init__.py create mode 100644 lib/gpt/core/foundation/lattice.py create mode 100644 lib/gpt/core/foundation/tensor.py diff --git a/lib/gpt/core/__init__.py b/lib/gpt/core/__init__.py index d232d2e5..6fb436de 100644 --- a/lib/gpt/core/__init__.py +++ b/lib/gpt/core/__init__.py @@ -17,6 +17,7 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # +import gpt.core.foundation from gpt.core.quadruple_precision import qfloat, qfloat_array, qcomplex, qcomplex_array from gpt.core.grid import grid, grid_from_description, full, redblack, general from gpt.core.precision import single, double, double_quadruple, precision, str_to_precision diff --git a/lib/gpt/core/foundation/__init__.py b/lib/gpt/core/foundation/__init__.py new file mode 100644 index 00000000..0846e79d --- /dev/null +++ b/lib/gpt/core/foundation/__init__.py @@ -0,0 +1,20 @@ +# +# GPT - Grid Python Toolkit +# Copyright (C) 2023 Christoph Lehner (christoph.lehner@ur.de, https://github.com/lehner/gpt) +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +import gpt.core.foundation.lattice +import gpt.core.foundation.tensor diff --git a/lib/gpt/core/foundation/lattice.py b/lib/gpt/core/foundation/lattice.py new file mode 100644 index 00000000..2578fc5b --- /dev/null +++ b/lib/gpt/core/foundation/lattice.py @@ -0,0 +1,32 @@ +# +# GPT - Grid Python Toolkit +# Copyright (C) 2023 Christoph Lehner (christoph.lehner@ur.de, https://github.com/lehner/gpt) +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +import gpt +import cgpt + + +def rank_inner_product(a, b, use_accelerator): + a = [gpt.eval(x) for x in a] + b = [gpt.eval(x) for x in b] + otype = a[0].otype + assert len(otype.v_idx) == len(b[0].otype.v_idx) + return cgpt.lattice_rank_inner_product(a, b, use_accelerator) + + +def inner_product(a, b, use_accelerator): + return a[0].grid.globalsum(rank_inner_product(a, b, use_accelerator)) diff --git a/lib/gpt/core/foundation/tensor.py b/lib/gpt/core/foundation/tensor.py new file mode 100644 index 00000000..dbc4f3be --- /dev/null +++ b/lib/gpt/core/foundation/tensor.py @@ -0,0 +1,28 @@ +# +# GPT - Grid Python Toolkit +# Copyright (C) 2023 Christoph Lehner (christoph.lehner@ur.de, https://github.com/lehner/gpt) +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +import gpt +import numpy + + +def rank_inner_product(a, b, use_accelerator): + return numpy.array([[gpt.trace(gpt.adj(x) * y) for y in b] for x in a], dtype=numpy.complex128) + + +def inner_product(a, b, use_accelerator): + return rank_inner_product(a, b, use_accelerator) diff --git a/lib/gpt/core/lattice.py b/lib/gpt/core/lattice.py index e484ef45..9200c8e6 100644 --- a/lib/gpt/core/lattice.py +++ b/lib/gpt/core/lattice.py @@ -20,6 +20,7 @@ from gpt.default import is_verbose from gpt.core.expr import factor from gpt.core.mem import host +from gpt.core.foundation import lattice as foundation mem_book = {} verbose_lattice_creation = is_verbose("lattice_creation") @@ -53,6 +54,7 @@ def unpack_cache_key(key): class lattice(factor): __array_priority__ = 1000000 cache = {} + foundation = foundation def __init__(self, first, second=None, third=None): self.metadata = {} diff --git a/lib/gpt/core/operator/unary.py b/lib/gpt/core/operator/unary.py index c882aa0e..9cbd11a9 100644 --- a/lib/gpt/core/operator/unary.py +++ b/lib/gpt/core/operator/unary.py @@ -96,6 +96,8 @@ def trace(l, t=None): t = gpt.expr_unary.BIT_SPINTRACE | gpt.expr_unary.BIT_COLORTRACE if isinstance(l, gpt.tensor): return l.trace(t) + elif gpt.util.is_num(l): + return l return gpt.expr(l, t) diff --git a/lib/gpt/core/quadruple_precision/qcomplex_array.py b/lib/gpt/core/quadruple_precision/qcomplex_array.py index 4e4cfe2e..57702fa3 100644 --- a/lib/gpt/core/quadruple_precision/qcomplex_array.py +++ b/lib/gpt/core/quadruple_precision/qcomplex_array.py @@ -48,6 +48,9 @@ def __repr__(self): def __array__(self, dtype=None): return NotImplemented + def __getitem__(self, index): + return gcomplex(self.real[index], self.imag[index]) + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): if ufunc not in HANDLED_FUNCTIONS or method != "__call__": return NotImplemented diff --git a/lib/gpt/core/quadruple_precision/qfloat_array.py b/lib/gpt/core/quadruple_precision/qfloat_array.py index 88ebcd48..8eb47139 100644 --- a/lib/gpt/core/quadruple_precision/qfloat_array.py +++ b/lib/gpt/core/quadruple_precision/qfloat_array.py @@ -54,10 +54,7 @@ def __array__(self, dtype=None): return NotImplemented def __getitem__(self, index): - if isinstance(index, int): - return g.qfloat(self.x[index], self.y[index]) - else: - raise NotImplementedError(f"Array slicing not yet implemented: {index}") + return g.qfloat(self.x[index], self.y[index]) def __float__(self): if self.x.shape != (1,): diff --git a/lib/gpt/core/tensor.py b/lib/gpt/core/tensor.py index e516e635..67ff4621 100644 --- a/lib/gpt/core/tensor.py +++ b/lib/gpt/core/tensor.py @@ -19,9 +19,12 @@ import cgpt import gpt import numpy as np +from gpt.core.foundation import tensor as foundation class tensor: + foundation = foundation + def __init__(self, first, second=None): if second is not None: array, otype = first, second diff --git a/lib/gpt/core/transform.py b/lib/gpt/core/transform.py index de9e7e5d..8ff304d3 100644 --- a/lib/gpt/core/transform.py +++ b/lib/gpt/core/transform.py @@ -59,50 +59,47 @@ def copy(first, second=None): return t -def rank_inner_product(a, b, use_accelerator=True): +def call_binary_aa_num(functional, a, b): return_list = (isinstance(a, list)) or (isinstance(b, list)) a = gpt.util.to_list(a) b = gpt.util.to_list(b) - if isinstance(a[0], gpt.tensor) and isinstance(b[0], gpt.tensor): - res = numpy.array([[gpt.adj(x) * y for y in b] for x in a], dtype=numpy.complex128) - else: - a = [gpt.eval(x) for x in a] - b = [gpt.eval(x) for x in b] - otype = a[0].otype - assert len(otype.v_idx) == len(b[0].otype.v_idx) - res = cgpt.lattice_rank_inner_product(a, b, use_accelerator) + res = functional(a, b) if return_list: return res return gpt.util.to_num(res[0, 0]) -def inner_product(a, b): - if isinstance(a, gpt.tensor): - return gpt.trace(gpt.adj(a) * b) - grid = gpt.util.to_list(a)[0].grid - return grid.globalsum(rank_inner_product(a, b)) +def rank_inner_product(a, b, use_accelerator=True): + return call_binary_aa_num( + lambda la, lb: la[0].__class__.foundation.rank_inner_product(la, lb, use_accelerator), a, b + ) + + +def inner_product(a, b, use_accelerator=True): + return call_binary_aa_num( + lambda la, lb: la[0].__class__.foundation.inner_product(la, lb, use_accelerator), a, b + ) def norm2(l): l = gpt.eval(l) return_list = isinstance(l, list) l = gpt.util.to_list(l) - l_lattices = [(i, l[i]) for i in range(len(l)) if isinstance(l[i], gpt.lattice)] - l_tensors = [(i, l[i]) for i in range(len(l)) if isinstance(l[i], gpt.tensor)] - if len(l_lattices) > 0: - ip_l = ( - l_lattices[0][1] - .grid.globalsum( - numpy.array([rank_inner_product(x, x) for i, x in l_lattices], dtype=numpy.complex128) - ) - .real - ) - ip_t = [x.norm2().real for i, x in l_tensors] + objects = {} + indices = {} + for n, x in enumerate(l): + fnd = x.foundation + if x not in objects: + objects[fnd] = [] + indices[fnd] = [] + objects[fnd].append(x) + indices[fnd].append(n) ip = numpy.ndarray(dtype=numpy.float64, shape=(len(l),)) - for i, j in enumerate(l_lattices): - ip[j[0]] = ip_l[i] - for i, j in enumerate(l_tensors): - ip[j[0]] = ip_t[i] + for fnd in objects: + obj = objects[fnd] + ip_o = fnd.inner_product(obj, obj, True).real + for n, i in enumerate(indices[fnd]): + ip[i] = ip_o[n] if return_list: return ip return gpt.util.to_num(ip[0])