-
Notifications
You must be signed in to change notification settings - Fork 1
/
alsace_Eopt.py
168 lines (152 loc) · 6.8 KB
/
alsace_Eopt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 27 13:43:11 2018
@author: galetzka
"""
import openturns as ot
import numpy as np
import scipy.linalg as sp
from idx_admissibility import admissible_neighbors
from tools import get_ed, transform_multi_index_set, get_design_matrix,power_iteration
def alsace(func, N, jpdf, tol=1e-22, sample_type='R', limit_cond=100,
max_fcalls=1000, seed=123, ed_file=None, ed_fevals_file=None,
verbose=True, pce_dict={}):
"""
ALSACE - Approximations via Lower-Set and Least-Squares-based Adaptive Chaos Expansions
func: function to be approximated.
N: number of parameters.
jpdf: joint probability density function.
limit_cond: maximum allowed condition number of design matrix D
sample_type: 'R'-random, 'L'-LHS
seed: sampling seed
tol, max_fcalls: exit criteria, self-explanatory.
ed_file, ed_fevals_file: experimental design and corresponding evaluations
'act': activated, i.e. already part of the approximation.
'adm': admissible, i.e. candidates for the approximation's expansion.
"""
if not pce_dict: # if pce_dict is empty --> cold-start
idx_act = []
idx_act.append([0]*N) # start with 0 multi-index
idx_adm = []
# set seed
ot.RandomGenerator.SetSeed(seed)
ed_size = 2*N # initial number of samples
# initial experimental design and coresponding evaluations
ed, ed_fevals = get_ed(func, jpdf, ed_size, sample_type=sample_type,
knots=[], values=[], ed_file=ed_file,
ed_fevals_file=ed_fevals_file)
global_error_indicator = 1.0 # give arbitrary sufficiently large value
# get the distribution type of each random variable
dist_types = []
for i in range(N):
dist_type = jpdf.getMarginal(i).getName()
dist_types.append(dist_type)
# create orthogonal univariate bases
poly_collection = ot.PolynomialFamilyCollection(N)
for i in range(N):
pdf = jpdf.getDistributionCollection()[i]
algo = ot.AdaptiveStieltjesAlgorithm(pdf)
poly_collection[i] = ot.StandardDistributionPolynomialFactory(algo)
# create multivariate basis
mv_basis = ot.OrthogonalProductPolynomialFactory(
poly_collection,
ot.LinearEnumerateFunction(N)
)
# get enumerate function (multi-index handling)
enum_func = mv_basis.getEnumerateFunction()
else: # get data from dictionary
idx_act = pce_dict['idx_act']
idx_adm = pce_dict['idx_adm']
pce_coeff_act = pce_dict['pce_coeff_act']
pce_coeff_adm = pce_dict['pce_coeff_adm']
ed = pce_dict['ed']
ed_fevals = pce_dict['ed_fevals']
ed_size = len(ed_fevals)
# compute local and global error indicators
global_error_indicator = np.sum(np.array(pce_coeff_adm)**2)
enum_func = pce_dict['enum_func']
mv_basis = pce_dict['mv_basis']
#
while ed_size < max_fcalls and global_error_indicator > tol:
# the index added last to the activated set is the one to be refined
last_act_idx = idx_act[-1][:]
# get admissible neighbors of the lastly added index
adm_neighbors = admissible_neighbors(last_act_idx, idx_act)
# update admissible indices
idx_adm = idx_adm + adm_neighbors
# get polynomial basis for the LS problem
idx_ls = idx_act + idx_adm
idx_ls_single = transform_multi_index_set(idx_ls, enum_func)
ls_basis = mv_basis.getSubBasis(idx_ls_single)
ls_basis_size = len(ls_basis)
# construct the design matrix D and compute its QR decomposition and its
# condition number
D = get_design_matrix(ls_basis, ed)
Q, R = sp.qr(D, mode='economic')
condD = np.linalg.cond(R)
# get largest eigenvalue of A^-1
A = np.matmul(D.T, D) / ed_size
# lambda_max=power_iteration(A,1000)
# lambda_min=power_iteration(A-lambda_max*np.eye(A.shape[0]),10000)+lambda_max
#
# print('--------- power iteration ----------')
# print('lambda max= ', lambda_max)
# print('lambda min= ', lambda_min)
# print('lambda max inv= ', 1./lambda_min)
# print('--------- numpy ----------')
# print('lambda max= ', max(np.linalg.eig(A)[0]))
# print('lambda min= ', min(np.linalg.eig(A)[0]))
# print('lambda max inv lambda= ', 1./min(np.linalg.eig(A)[0]))
# print('lambda max inv A= ', max(np.linalg.eig(np.linalg.inv(A))[0]))
# print('')
# print('')
eigA = 1./min(np.linalg.eig(A)[0])
# If condD becomes too large, enrich the ED until condD becomes acceptable
# or until ed_size reaches max_fcalls
while (eigA > limit_cond and ed_size < max_fcalls) or ed_size < ls_basis_size:
# inform user
if verbose:
print('WARNING: condition(D) = ' , condD)
print('WARNING: lambda_max(A^-1) = ' , eigA)
print("")
# select new size for the ED
if ls_basis_size > ed_size:
ed_size = ls_basis_size + N
elif ed_size + N > max_fcalls:
ed_size = max_fcalls
else:
ed_size = ed_size + N
# expand ED
ed, ed_fevals = get_ed(func, jpdf, ed_size, sample_type=sample_type,
knots=ed, values=ed_fevals, ed_file=ed_file,
ed_fevals_file=ed_fevals_file)
# construct the design matrix D and compute its QR decomposition and its
# condition number
D = get_design_matrix(ls_basis, ed)
Q, R = sp.qr(D,mode='economic')
condD = np.linalg.cond(R)
A = np.matmul(D.T, D) / ed_size
eigA = 1./min(np.linalg.eig(A)[0])
# solve LS problem
c = Q.T.dot(ed_fevals)
pce_coeff_ls = sp.solve_triangular(R, c)
# find the multi-index with the largest contribution, add it to idx_act
# and delete it from idx_adm
pce_coeff_act = pce_coeff_ls[:len(idx_act)].tolist()
pce_coeff_adm = pce_coeff_ls[-len(idx_adm):].tolist()
help_idx = np.argmax(np.abs(pce_coeff_adm))
idx_add = idx_adm.pop(help_idx)
pce_coeff_add = pce_coeff_adm.pop(help_idx)
idx_act.append(idx_add)
pce_coeff_act.append(pce_coeff_add)
# store expansion data in dictionary
pce_dict = {}
pce_dict['idx_act'] = idx_act
pce_dict['idx_adm'] = idx_adm
pce_dict['pce_coeff_act'] = pce_coeff_act
pce_dict['pce_coeff_adm'] = pce_coeff_adm
pce_dict['ed'] = ed
pce_dict['ed_fevals'] = ed_fevals
pce_dict['enum_func'] = enum_func
pce_dict['mv_basis'] = mv_basis
return pce_dict